Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate defines a `TensorImage` struct that represents an image as a
21tensor, along with its format information. It also provides an
22`ImageProcessor` struct that manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, TensorImage, RGBA, RGB, Rotation, Flip, Crop, ImageProcessorTrait};
29# fn main() -> Result<(), edgefirst_image::Error> {
30let image = include_bytes!("../../../testdata/zidane.jpg");
31let img = TensorImage::load(image, Some(RGBA), None)?;
32let mut converter = ImageProcessor::new()?;
33let mut dst = TensorImage::new(640, 480, RGB, None)?;
34converter.convert(&img, &mut dst, Rotation::None, Flip::None, Crop::default())?;
35# Ok(())
36# }
37```
38
39## Environment Variables
40The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
41following environment variables:
42- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
43  conversion, forcing the use of CPU or other available hardware methods.
44- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
45  conversion, forcing the use of CPU or other available hardware methods.
46- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
47  conversion, forcing the use of hardware acceleration methods. If no hardware
48  acceleration methods are available, an error will be returned when attempting
49  to create an `ImageProcessor`.
50
51Additionally the TensorMemory used by default allocations can be controlled using the
52`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
53uses system memory. This will disable the use of specialized memory regions for tensors
54and hardware acceleration. However, this will increase the performance of the CPU converter.
55*/
56#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
57
58use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
59use edgefirst_tensor::{Tensor, TensorMemory, TensorTrait as _};
60use enum_dispatch::enum_dispatch;
61use four_char_code::{four_char_code, FourCharCode};
62use std::{fmt::Display, time::Instant};
63use zune_jpeg::{
64    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
65    JpegDecoder,
66};
67use zune_png::PngDecoder;
68
69pub use cpu::CPUProcessor;
70pub use error::{Error, Result};
71#[cfg(target_os = "linux")]
72pub use g2d::G2DProcessor;
73#[cfg(target_os = "linux")]
74#[cfg(feature = "opengl")]
75pub use opengl_headless::GLProcessorThreaded;
76#[cfg(target_os = "linux")]
77#[cfg(feature = "opengl")]
78pub use opengl_headless::Int8InterpolationMode;
79#[cfg(target_os = "linux")]
80#[cfg(feature = "opengl")]
81pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
82
83/// Result of rendering a single per-instance grayscale mask.
84///
85/// Contains the bounding-box region in output image coordinates and the
86/// raw uint8 pixel data (RED channel only, 0–255 representing sigmoid output).
87#[derive(Debug, Clone)]
88pub(crate) struct MaskResult {
89    /// X offset of the bbox region in the output image.
90    pub(crate) x: usize,
91    /// Y offset of the bbox region in the output image.
92    pub(crate) y: usize,
93    /// Width of the bbox region.
94    pub(crate) w: usize,
95    /// Height of the bbox region.
96    pub(crate) h: usize,
97    /// Grayscale pixel data (w * h bytes, row-major).
98    pub(crate) pixels: Vec<u8>,
99}
100
101/// Region metadata for a single detection within a compact mask atlas.
102///
103/// The atlas packs padded bounding-box strips vertically.  This struct
104/// records where each detection's strip lives in the atlas and how it
105/// maps back to the original output coordinate space.
106#[must_use]
107#[derive(Debug, Clone, Copy)]
108pub struct MaskRegion {
109    /// Row offset of this detection's strip in the atlas.
110    pub atlas_y_offset: usize,
111    /// Left edge of the padded bbox in output image coordinates.
112    pub padded_x: usize,
113    /// Top edge of the padded bbox in output image coordinates.
114    pub padded_y: usize,
115    /// Width of the padded bbox.
116    pub padded_w: usize,
117    /// Height of the padded bbox (= number of atlas rows for this strip).
118    pub padded_h: usize,
119    /// Original (unpadded) bbox left edge in output image coordinates.
120    pub bbox_x: usize,
121    /// Original (unpadded) bbox top edge in output image coordinates.
122    pub bbox_y: usize,
123    /// Original (unpadded) bbox width.
124    pub bbox_w: usize,
125    /// Original (unpadded) bbox height.
126    pub bbox_h: usize,
127}
128
129mod cpu;
130mod error;
131mod g2d;
132mod opengl_headless;
133
134/// 8 bit interleaved YUV422, limited range
135pub const YUYV: FourCharCode = four_char_code!("YUYV");
136/// 8 bit interleaved YUV422 (VYUY byte order), limited range
137pub const VYUY: FourCharCode = four_char_code!("VYUY");
138/// 8 bit planar YUV420, limited range
139pub const NV12: FourCharCode = four_char_code!("NV12");
140/// 8 bit planar YUV422, limited range
141pub const NV16: FourCharCode = four_char_code!("NV16");
142/// 8 bit RGBA
143pub const RGBA: FourCharCode = four_char_code!("RGBA");
144/// 8 bit RGB
145pub const RGB: FourCharCode = four_char_code!("RGB ");
146/// 8 bit grayscale, full range
147pub const GREY: FourCharCode = four_char_code!("Y800");
148
149// TODO: planar RGB is 8BPS? https://fourcc.org/8bps/
150pub const PLANAR_RGB: FourCharCode = four_char_code!("8BPS");
151
152// TODO: What fourcc code is planar RGBA?
153pub const PLANAR_RGBA: FourCharCode = four_char_code!("8BPA");
154
155/// Packed RGB with uint8→int8 XOR 0x80 reinterpretation.
156/// The underlying bytes are uint8 with MSB flipped; when cast to i8, values
157/// map correctly: uint8 0 → int8 -128, uint8 128 → int8 0, uint8 255 → int8 127.
158pub const RGB_INT8: FourCharCode = four_char_code!("RGBi");
159
160/// Planar RGB (channels-first) with uint8→int8 XOR 0x80 reinterpretation.
161/// The underlying bytes are uint8 with MSB flipped; when cast to i8, values
162/// map correctly: uint8 0 → int8 -128, uint8 128 → int8 0, uint8 255 → int8 127.
163/// Tensor shape is `[3, H, W]` (channels-first).
164pub const PLANAR_RGB_INT8: FourCharCode = four_char_code!("8BPi");
165
166/// An image represented as a tensor with associated format information.
167#[derive(Debug)]
168pub struct TensorImage {
169    tensor: Tensor<u8>,
170    fourcc: FourCharCode,
171    is_planar: bool,
172}
173
174impl TensorImage {
175    /// Creates a new `TensorImage` with the specified width, height, format,
176    /// and memory type.
177    ///
178    /// # Examples
179    /// ```rust
180    /// use edgefirst_image::{RGB, TensorImage};
181    /// use edgefirst_tensor::TensorMemory;
182    /// # fn main() -> Result<(), edgefirst_image::Error> {
183    /// let img = TensorImage::new(640, 480, RGB, Some(TensorMemory::Mem))?;
184    /// assert_eq!(img.width(), 640);
185    /// assert_eq!(img.height(), 480);
186    /// assert_eq!(img.fourcc(), RGB);
187    /// assert!(!img.is_planar());
188    /// # Ok(())
189    /// # }
190    /// ```
191    pub fn new(
192        width: usize,
193        height: usize,
194        fourcc: FourCharCode,
195        memory: Option<TensorMemory>,
196    ) -> Result<Self> {
197        let channels = fourcc_channels(fourcc)?;
198        let is_planar = fourcc_planar(fourcc)?;
199
200        // NV12 is semi-planar with Y plane (W×H) + UV plane (W×H/2)
201        // Total bytes = W × H × 1.5. Use shape [H*3/2, W] to encode this.
202        if fourcc == NV12 {
203            let shape = vec![height * 3 / 2, width];
204            let tensor = Tensor::new(&shape, memory, None)?;
205
206            return Ok(Self {
207                tensor,
208                fourcc,
209                is_planar,
210            });
211        }
212
213        // NV16 is semi-planar with Y plane (W×H) + UV plane (W×H)
214        // Total bytes = W × H × 2. Use shape [H*2, W] to encode this.
215        if fourcc == NV16 {
216            let shape = vec![height * 2, width];
217            let tensor = Tensor::new(&shape, memory, None)?;
218
219            return Ok(Self {
220                tensor,
221                fourcc,
222                is_planar,
223            });
224        }
225
226        if is_planar {
227            let shape = vec![channels, height, width];
228            let tensor = Tensor::new(&shape, memory, None)?;
229
230            return Ok(Self {
231                tensor,
232                fourcc,
233                is_planar,
234            });
235        }
236
237        let shape = vec![height, width, channels];
238        let tensor = Tensor::new(&shape, memory, None)?;
239
240        Ok(Self {
241            tensor,
242            fourcc,
243            is_planar,
244        })
245    }
246
247    /// Creates a new `TensorImage` from an existing tensor and specified
248    /// format.
249    ///
250    /// The required tensor shape depends on the pixel format:
251    ///
252    /// | Format | Shape | Description |
253    /// |--------|-------|-------------|
254    /// | `RGB`  | `[H, W, 3]` | 3-channel interleaved |
255    /// | `RGBA` | `[H, W, 4]` | 4-channel interleaved |
256    /// | `GREY` | `[H, W, 1]` | Single-channel grayscale |
257    /// | `YUYV` | `[H, W, 2]` | YUV 4:2:2 interleaved |
258    /// | `PLANAR_RGB`  | `[3, H, W]` | Channels-first (3 planes) |
259    /// | `PLANAR_RGBA` | `[4, H, W]` | Channels-first (4 planes) |
260    /// | `RGB_INT8` | `[H, W, 3]` | Packed RGB, int8 via XOR 0x80 |
261    /// | `PLANAR_RGB_INT8` | `[3, H, W]` | Planar RGB, int8 via XOR 0x80 |
262    /// | `NV12` | `[H*3/2, W]` | Semi-planar YUV 4:2:0 (2D) |
263    /// | `NV16` | `[H*2, W]`   | Semi-planar YUV 4:2:2 (2D) |
264    ///
265    /// Most formats use a 3D tensor where the channel dimension matches
266    /// the format's channel count. The semi-planar formats NV12 and NV16
267    /// are special: the Y and UV planes have different heights, so the
268    /// data cannot be described as `[H, W, C]`. Instead the contiguous
269    /// memory is represented as a 2D tensor whose first dimension encodes
270    /// the total byte height (Y rows + UV rows).
271    ///
272    /// # Examples
273    ///
274    /// RGB (3D interleaved):
275    /// ```rust
276    /// use edgefirst_image::{RGB, TensorImage};
277    /// use edgefirst_tensor::Tensor;
278    ///  # fn main() -> Result<(), edgefirst_image::Error> {
279    /// let tensor = Tensor::new(&[720, 1280, 3], None, None)?;
280    /// let img = TensorImage::from_tensor(tensor, RGB)?;
281    /// assert_eq!(img.width(), 1280);
282    /// assert_eq!(img.height(), 720);
283    /// assert_eq!(img.fourcc(), RGB);
284    /// # Ok(())
285    /// # }
286    /// ```
287    ///
288    /// GREY (3D with 1 channel):
289    /// ```rust
290    /// use edgefirst_image::{GREY, TensorImage};
291    /// use edgefirst_tensor::Tensor;
292    ///  # fn main() -> Result<(), edgefirst_image::Error> {
293    /// let tensor = Tensor::new(&[480, 640, 1], None, None)?;
294    /// let img = TensorImage::from_tensor(tensor, GREY)?;
295    /// assert_eq!(img.width(), 640);
296    /// assert_eq!(img.height(), 480);
297    /// # Ok(())
298    /// # }
299    /// ```
300    ///
301    /// NV12 (2D semi-planar, height*3/2 rows):
302    /// ```rust
303    /// use edgefirst_image::{NV12, TensorImage};
304    /// use edgefirst_tensor::Tensor;
305    ///  # fn main() -> Result<(), edgefirst_image::Error> {
306    /// // 1080p NV12: 1080 Y rows + 540 UV rows = 1620 total rows
307    /// let tensor = Tensor::new(&[1620, 1920], None, None)?;
308    /// let img = TensorImage::from_tensor(tensor, NV12)?;
309    /// assert_eq!(img.width(), 1920);
310    /// assert_eq!(img.height(), 1080);
311    /// # Ok(())
312    /// # }
313    /// ```
314    pub fn from_tensor(tensor: Tensor<u8>, fourcc: FourCharCode) -> Result<Self> {
315        let shape = tensor.shape();
316        let is_planar = fourcc_planar(fourcc)?;
317
318        // NV12/NV16 use 2D shape [H*3/2, W] or [H*2, W] respectively
319        if fourcc == NV12 || fourcc == NV16 {
320            if shape.len() != 2 {
321                return Err(Error::InvalidShape(format!(
322                    "Semi-planar format {} requires 2D tensor, got {}: {:?}",
323                    fourcc.to_string(),
324                    shape.len(),
325                    shape
326                )));
327            }
328            return Ok(Self {
329                tensor,
330                fourcc,
331                is_planar,
332            });
333        }
334
335        // All other formats use 3D shape
336        if shape.len() != 3 {
337            return Err(Error::InvalidShape(format!(
338                "Tensor shape must have 3 dimensions, got {}: {:?}",
339                shape.len(),
340                shape
341            )));
342        }
343        let channels = if is_planar { shape[0] } else { shape[2] };
344
345        if fourcc_channels(fourcc)? != channels {
346            return Err(Error::InvalidShape(format!(
347                "Invalid tensor shape {:?} for format {}",
348                shape,
349                fourcc.to_string()
350            )));
351        }
352
353        Ok(Self {
354            tensor,
355            fourcc,
356            is_planar,
357        })
358    }
359
360    /// Loads an image from the given byte slice, attempting to decode it as
361    /// JPEG or PNG format. Exif orientation is supported. The default format is
362    /// RGB.
363    ///
364    /// # Examples
365    /// ```rust
366    /// use edgefirst_image::{RGBA, TensorImage};
367    /// use edgefirst_tensor::TensorMemory;
368    /// # fn main() -> Result<(), edgefirst_image::Error> {
369    /// let jpeg_bytes = include_bytes!("../../../testdata/zidane.png");
370    /// let img = TensorImage::load(jpeg_bytes, Some(RGBA), Some(TensorMemory::Mem))?;
371    /// assert_eq!(img.width(), 1280);
372    /// assert_eq!(img.height(), 720);
373    /// assert_eq!(img.fourcc(), RGBA);
374    /// # Ok(())
375    /// # }
376    /// ```
377    pub fn load(
378        image: &[u8],
379        format: Option<FourCharCode>,
380        memory: Option<TensorMemory>,
381    ) -> Result<Self> {
382        if let Ok(i) = Self::load_jpeg(image, format, memory) {
383            return Ok(i);
384        }
385        if let Ok(i) = Self::load_png(image, format, memory) {
386            return Ok(i);
387        }
388
389        Err(Error::NotSupported(
390            "Could not decode as jpeg or png".to_string(),
391        ))
392    }
393
394    /// Loads a JPEG image from the given byte slice. Supports EXIF orientation.
395    /// The default format is RGB.
396    ///
397    /// # Examples
398    /// ```rust
399    /// use edgefirst_image::{RGB, TensorImage};
400    /// use edgefirst_tensor::TensorMemory;
401    /// # fn main() -> Result<(), edgefirst_image::Error> {
402    /// let jpeg_bytes = include_bytes!("../../../testdata/zidane.jpg");
403    /// let img = TensorImage::load_jpeg(jpeg_bytes, Some(RGB), Some(TensorMemory::Mem))?;
404    /// assert_eq!(img.width(), 1280);
405    /// assert_eq!(img.height(), 720);
406    /// assert_eq!(img.fourcc(), RGB);
407    /// # Ok(())
408    /// # }
409    /// ```
410    pub fn load_jpeg(
411        image: &[u8],
412        format: Option<FourCharCode>,
413        memory: Option<TensorMemory>,
414    ) -> Result<Self> {
415        let colour = match format {
416            Some(RGB) => ColorSpace::RGB,
417            Some(RGBA) => ColorSpace::RGBA,
418            Some(GREY) => ColorSpace::Luma,
419            None => ColorSpace::RGB,
420            Some(f) => {
421                return Err(Error::NotSupported(format!(
422                    "Unsupported image format {}",
423                    f.display()
424                )));
425            }
426        };
427        let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
428        let mut decoder = JpegDecoder::new_with_options(image, options);
429        decoder.decode_headers()?;
430
431        let image_info = decoder.info().ok_or(Error::Internal(
432            "JPEG did not return decoded image info".to_string(),
433        ))?;
434
435        let converted_color_space = decoder
436            .get_output_colorspace()
437            .ok_or(Error::Internal("No output colorspace".to_string()))?;
438
439        let converted_color_space = match converted_color_space {
440            ColorSpace::RGB => RGB,
441            ColorSpace::RGBA => RGBA,
442            ColorSpace::Luma => GREY,
443            _ => {
444                return Err(Error::NotSupported(
445                    "Unsupported JPEG decoder output".to_string(),
446                ));
447            }
448        };
449
450        let dest_format = format.unwrap_or(converted_color_space);
451
452        let (rotation, flip) = decoder
453            .exif()
454            .map(|x| Self::read_exif_orientation(x))
455            .unwrap_or((Rotation::None, Flip::None));
456
457        if (rotation, flip) == (Rotation::None, Flip::None) {
458            let mut img = Self::new(
459                image_info.width as usize,
460                image_info.height as usize,
461                dest_format,
462                memory,
463            )?;
464
465            if converted_color_space != dest_format {
466                let tmp = Self::new(
467                    image_info.width as usize,
468                    image_info.height as usize,
469                    converted_color_space,
470                    Some(TensorMemory::Mem),
471                )?;
472
473                decoder.decode_into(&mut tmp.tensor.map()?)?;
474
475                CPUProcessor::convert_format(&tmp, &mut img)?;
476                return Ok(img);
477            }
478            decoder.decode_into(&mut img.tensor.map()?)?;
479            return Ok(img);
480        }
481
482        let mut tmp = Self::new(
483            image_info.width as usize,
484            image_info.height as usize,
485            dest_format,
486            Some(TensorMemory::Mem),
487        )?;
488
489        if converted_color_space != dest_format {
490            let tmp2 = Self::new(
491                image_info.width as usize,
492                image_info.height as usize,
493                converted_color_space,
494                Some(TensorMemory::Mem),
495            )?;
496
497            decoder.decode_into(&mut tmp2.tensor.map()?)?;
498
499            CPUProcessor::convert_format(&tmp2, &mut tmp)?;
500        } else {
501            decoder.decode_into(&mut tmp.tensor.map()?)?;
502        }
503
504        rotate_flip_to_tensor_image(&tmp, rotation, flip, memory)
505    }
506
507    /// Loads a PNG image from the given byte slice. Supports EXIF orientation.
508    /// The default format is RGB.
509    ///
510    /// # Examples
511    /// ```rust
512    /// use edgefirst_image::{RGB, TensorImage};
513    /// use edgefirst_tensor::TensorMemory;
514    /// # fn main() -> Result<(), edgefirst_image::Error> {
515    /// let png_bytes = include_bytes!("../../../testdata/zidane.png");
516    /// let img = TensorImage::load_png(png_bytes, Some(RGB), Some(TensorMemory::Mem))?;
517    /// assert_eq!(img.width(), 1280);
518    /// assert_eq!(img.height(), 720);
519    /// assert_eq!(img.fourcc(), RGB);
520    /// # Ok(())
521    /// # }
522    /// ```
523    pub fn load_png(
524        image: &[u8],
525        format: Option<FourCharCode>,
526        memory: Option<TensorMemory>,
527    ) -> Result<Self> {
528        let format = format.unwrap_or(RGB);
529        let alpha = match format {
530            RGB => false,
531            RGBA => true,
532            _ => {
533                return Err(Error::NotImplemented(
534                    "Unsupported image format".to_string(),
535                ));
536            }
537        };
538
539        let options = DecoderOptions::default()
540            .png_set_add_alpha_channel(alpha)
541            .png_set_decode_animated(false);
542        let mut decoder = PngDecoder::new_with_options(image, options);
543        decoder.decode_headers()?;
544        let image_info = decoder.get_info().ok_or(Error::Internal(
545            "PNG did not return decoded image info".to_string(),
546        ))?;
547
548        let (rotation, flip) = image_info
549            .exif
550            .as_ref()
551            .map(|x| Self::read_exif_orientation(x))
552            .unwrap_or((Rotation::None, Flip::None));
553
554        if (rotation, flip) == (Rotation::None, Flip::None) {
555            let img = Self::new(image_info.width, image_info.height, format, memory)?;
556            decoder.decode_into(&mut img.tensor.map()?)?;
557            return Ok(img);
558        }
559
560        let tmp = Self::new(
561            image_info.width,
562            image_info.height,
563            format,
564            Some(TensorMemory::Mem),
565        )?;
566        decoder.decode_into(&mut tmp.tensor.map()?)?;
567
568        rotate_flip_to_tensor_image(&tmp, rotation, flip, memory)
569    }
570
571    fn read_exif_orientation(exif_: &[u8]) -> (Rotation, Flip) {
572        let exifreader = exif::Reader::new();
573        let Ok(exif_) = exifreader.read_raw(exif_.to_vec()) else {
574            return (Rotation::None, Flip::None);
575        };
576        let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
577            return (Rotation::None, Flip::None);
578        };
579        match orientation.value.get_uint(0) {
580            Some(1) => (Rotation::None, Flip::None),
581            Some(2) => (Rotation::None, Flip::Horizontal),
582            Some(3) => (Rotation::Rotate180, Flip::None),
583            Some(4) => (Rotation::Rotate180, Flip::Horizontal),
584            Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
585            Some(6) => (Rotation::Clockwise90, Flip::None),
586            Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
587            Some(8) => (Rotation::CounterClockwise90, Flip::None),
588            Some(v) => {
589                log::warn!("broken orientation EXIF value: {v}");
590                (Rotation::None, Flip::None)
591            }
592            None => (Rotation::None, Flip::None),
593        }
594    }
595
596    /// Saves the image as a JPEG file at the specified path with the given
597    /// quality. Only RGB and RGBA formats are supported.
598    ///
599    /// # Examples
600    /// ```rust
601    /// use edgefirst_image::{RGB, TensorImage};
602    /// use edgefirst_tensor::Tensor;
603    ///  # fn main() -> Result<(), edgefirst_image::Error> {
604    /// let tensor = Tensor::new(&[720, 1280, 3], None, None)?;
605    /// let img = TensorImage::from_tensor(tensor, RGB)?;
606    /// let save_path = "/tmp/output.jpg";
607    /// img.save_jpeg(save_path, 90)?;
608    /// # Ok(())
609    /// # }
610    pub fn save_jpeg(&self, path: &str, quality: u8) -> Result<()> {
611        if self.is_planar {
612            return Err(Error::NotImplemented(
613                "Saving planar images is not supported".to_string(),
614            ));
615        }
616
617        let colour = if self.fourcc == RGB {
618            jpeg_encoder::ColorType::Rgb
619        } else if self.fourcc == RGBA {
620            jpeg_encoder::ColorType::Rgba
621        } else {
622            return Err(Error::NotImplemented(
623                "Unsupported image format for saving".to_string(),
624            ));
625        };
626
627        let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
628        let tensor_map = self.tensor.map()?;
629
630        encoder.encode(
631            &tensor_map,
632            self.width() as u16,
633            self.height() as u16,
634            colour,
635        )?;
636
637        Ok(())
638    }
639
640    /// Returns a reference to the underlying tensor.
641    ///
642    /// # Examples
643    /// ```rust
644    /// use edgefirst_image::{RGB, TensorImage};
645    /// use edgefirst_tensor::{Tensor, TensorTrait};
646    ///  # fn main() -> Result<(), edgefirst_image::Error> {
647    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
648    /// let img = TensorImage::from_tensor(tensor, RGB)?;
649    /// let underlying_tensor = img.tensor();
650    /// assert_eq!(underlying_tensor.name(), "Tensor");
651    /// # Ok(())
652    /// # }
653    pub fn tensor(&self) -> &Tensor<u8> {
654        &self.tensor
655    }
656
657    /// Returns the FourCC code representing the image format.
658    ///
659    /// # Examples
660    /// ```rust
661    /// use edgefirst_image::{RGB, TensorImage};
662    /// use edgefirst_tensor::{Tensor, TensorTrait};
663    ///  # fn main() -> Result<(), edgefirst_image::Error> {
664    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
665    /// let img = TensorImage::from_tensor(tensor, RGB)?;
666    /// assert_eq!(img.fourcc(), RGB);
667    /// # Ok(())
668    /// # }
669    pub fn fourcc(&self) -> FourCharCode {
670        self.fourcc
671    }
672
673    /// Override the FourCC format tag without touching the underlying tensor.
674    /// Used internally for int8 ↔ uint8 format aliasing where the pixel layout
675    /// is identical and only the interpretation differs.
676    pub(crate) fn set_fourcc(&mut self, fourcc: FourCharCode) {
677        self.fourcc = fourcc;
678    }
679
680    /// # Examples
681    /// ```rust
682    /// use edgefirst_image::{RGB, TensorImage};
683    /// use edgefirst_tensor::{Tensor, TensorTrait};
684    ///  # fn main() -> Result<(), edgefirst_image::Error> {
685    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
686    /// let img = TensorImage::from_tensor(tensor, RGB)?;
687    /// assert!(!img.is_planar());
688    /// # Ok(())
689    /// # }
690    pub fn is_planar(&self) -> bool {
691        self.is_planar
692    }
693
694    /// # Examples
695    /// ```rust
696    /// use edgefirst_image::{RGB, TensorImage};
697    /// use edgefirst_tensor::{Tensor, TensorTrait};
698    ///  # fn main() -> Result<(), edgefirst_image::Error> {
699    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
700    /// let img = TensorImage::from_tensor(tensor, RGB)?;
701    /// assert_eq!(img.width(), 1280);
702    /// # Ok(())
703    /// # }
704    pub fn width(&self) -> usize {
705        // NV12/NV16 use 2D shape [H*k, W]
706        if self.fourcc == NV12 || self.fourcc == NV16 {
707            return self.tensor.shape()[1];
708        }
709        match self.is_planar {
710            true => self.tensor.shape()[2],
711            false => self.tensor.shape()[1],
712        }
713    }
714
715    /// # Examples
716    /// ```rust
717    /// use edgefirst_image::{RGB, TensorImage};
718    /// use edgefirst_tensor::{Tensor, TensorTrait};
719    ///  # fn main() -> Result<(), edgefirst_image::Error> {
720    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
721    /// let img = TensorImage::from_tensor(tensor, RGB)?;
722    /// assert_eq!(img.height(), 720);
723    /// # Ok(())
724    /// # }
725    pub fn height(&self) -> usize {
726        // NV12 uses shape [H*3/2, W], so height = shape[0] * 2 / 3
727        if self.fourcc == NV12 {
728            return self.tensor.shape()[0] * 2 / 3;
729        }
730        // NV16 uses shape [H*2, W], so height = shape[0] / 2
731        if self.fourcc == NV16 {
732            return self.tensor.shape()[0] / 2;
733        }
734        match self.is_planar {
735            true => self.tensor.shape()[1],
736            false => self.tensor.shape()[0],
737        }
738    }
739
740    /// # Examples
741    /// ```rust
742    /// use edgefirst_image::{RGB, TensorImage};
743    /// use edgefirst_tensor::{Tensor, TensorTrait};
744    ///  # fn main() -> Result<(), edgefirst_image::Error> {
745    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
746    /// let img = TensorImage::from_tensor(tensor, RGB)?;
747    /// assert_eq!(img.channels(), 3);
748    /// # Ok(())
749    /// # }
750    pub fn channels(&self) -> usize {
751        // NV12/NV16 use 2D shape, conceptually have 2 components (Y + interleaved UV)
752        if self.fourcc == NV12 || self.fourcc == NV16 {
753            return 2;
754        }
755        match self.is_planar {
756            true => self.tensor.shape()[0],
757            false => self.tensor.shape()[2],
758        }
759    }
760
761    /// # Examples
762    /// ```rust
763    /// use edgefirst_image::{RGB, TensorImage};
764    /// use edgefirst_tensor::{Tensor, TensorTrait};
765    ///  # fn main() -> Result<(), edgefirst_image::Error> {
766    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
767    /// let img = TensorImage::from_tensor(tensor, RGB)?;
768    /// assert_eq!(img.row_stride(), 1280*3);
769    /// # Ok(())
770    /// # }
771    pub fn row_stride(&self) -> usize {
772        match self.is_planar {
773            true => self.width(),
774            false => self.width() * self.channels(),
775        }
776    }
777
778    /// Returns the buffer identity of the underlying tensor.
779    pub fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity {
780        self.tensor.buffer_identity()
781    }
782}
783
784/// Trait for types that can be used as destination images for conversion.
785///
786/// This trait abstracts over the difference between owned (`TensorImage`) and
787/// borrowed (`TensorImageRef`) image buffers, enabling the same conversion code
788/// to work with both.
789pub trait TensorImageDst {
790    /// Returns a reference to the underlying tensor.
791    fn tensor(&self) -> &Tensor<u8>;
792    /// Returns a mutable reference to the underlying tensor.
793    fn tensor_mut(&mut self) -> &mut Tensor<u8>;
794    /// Returns the FourCC code representing the image format.
795    fn fourcc(&self) -> FourCharCode;
796    /// Returns whether the image is in planar format.
797    fn is_planar(&self) -> bool;
798    /// Returns the width of the image in pixels.
799    fn width(&self) -> usize;
800    /// Returns the height of the image in pixels.
801    fn height(&self) -> usize;
802    /// Returns the number of channels in the image.
803    fn channels(&self) -> usize;
804    /// Returns the row stride in bytes.
805    fn row_stride(&self) -> usize;
806    /// Returns the buffer identity of the underlying tensor.
807    fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity;
808}
809
810impl TensorImageDst for TensorImage {
811    fn tensor(&self) -> &Tensor<u8> {
812        &self.tensor
813    }
814
815    fn tensor_mut(&mut self) -> &mut Tensor<u8> {
816        &mut self.tensor
817    }
818
819    fn fourcc(&self) -> FourCharCode {
820        self.fourcc
821    }
822
823    fn is_planar(&self) -> bool {
824        self.is_planar
825    }
826
827    fn width(&self) -> usize {
828        TensorImage::width(self)
829    }
830
831    fn height(&self) -> usize {
832        TensorImage::height(self)
833    }
834
835    fn channels(&self) -> usize {
836        TensorImage::channels(self)
837    }
838
839    fn row_stride(&self) -> usize {
840        TensorImage::row_stride(self)
841    }
842
843    fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity {
844        TensorImage::buffer_identity(self)
845    }
846}
847
848/// A borrowed view of an image tensor for zero-copy preprocessing.
849///
850/// `TensorImageRef` wraps a borrowed `&mut Tensor<u8>` instead of owning it,
851/// enabling zero-copy operations where the HAL writes directly into an external
852/// tensor (e.g., a model's pre-allocated input buffer).
853///
854/// # Examples
855/// ```rust,ignore
856/// // Create a borrowed tensor image wrapping the model's input tensor
857/// let mut dst = TensorImageRef::from_borrowed_tensor(
858///     model.input_tensor(0),
859///     PLANAR_RGB,
860/// )?;
861///
862/// // Preprocess directly into the model's input buffer
863/// processor.convert(&src_image, &mut dst, Rotation::None, Flip::None, Crop::default())?;
864///
865/// // Run inference - no copy needed!
866/// model.run()?;
867/// ```
868#[derive(Debug)]
869pub struct TensorImageRef<'a> {
870    pub(crate) tensor: &'a mut Tensor<u8>,
871    fourcc: FourCharCode,
872    is_planar: bool,
873}
874
875impl<'a> TensorImageRef<'a> {
876    /// Creates a `TensorImageRef` from a borrowed tensor reference.
877    ///
878    /// The tensor shape must match the expected format:
879    /// - For planar formats (e.g., PLANAR_RGB): shape is `[channels, height,
880    ///   width]`
881    /// - For interleaved formats (e.g., RGB, RGBA): shape is `[height, width,
882    ///   channels]`
883    ///
884    /// # Arguments
885    /// * `tensor` - A mutable reference to the tensor to wrap
886    /// * `fourcc` - The pixel format of the image
887    ///
888    /// # Returns
889    /// A `Result` containing the `TensorImageRef` or an error if the tensor
890    /// shape doesn't match the expected format.
891    pub fn from_borrowed_tensor(tensor: &'a mut Tensor<u8>, fourcc: FourCharCode) -> Result<Self> {
892        let shape = tensor.shape();
893        let is_planar = fourcc_planar(fourcc)?;
894
895        // NV12/NV16 use 2D shape [H*3/2, W] or [H*2, W] respectively
896        if fourcc == NV12 || fourcc == NV16 {
897            if shape.len() != 2 {
898                return Err(Error::InvalidShape(format!(
899                    "Semi-planar format {} requires 2D tensor, got {}: {:?}",
900                    fourcc.to_string(),
901                    shape.len(),
902                    shape
903                )));
904            }
905            return Ok(Self {
906                tensor,
907                fourcc,
908                is_planar,
909            });
910        }
911
912        // All other formats use 3D shape
913        if shape.len() != 3 {
914            return Err(Error::InvalidShape(format!(
915                "Tensor shape must have 3 dimensions, got {}: {:?}",
916                shape.len(),
917                shape
918            )));
919        }
920        let channels = if is_planar { shape[0] } else { shape[2] };
921
922        if fourcc_channels(fourcc)? != channels {
923            return Err(Error::InvalidShape(format!(
924                "Invalid tensor shape {:?} for format {}",
925                shape,
926                fourcc.to_string()
927            )));
928        }
929
930        Ok(Self {
931            tensor,
932            fourcc,
933            is_planar,
934        })
935    }
936
937    /// Returns a reference to the underlying tensor.
938    pub fn tensor(&self) -> &Tensor<u8> {
939        self.tensor
940    }
941
942    /// Returns the FourCC code representing the image format.
943    pub fn fourcc(&self) -> FourCharCode {
944        self.fourcc
945    }
946
947    /// Returns whether the image is in planar format.
948    pub fn is_planar(&self) -> bool {
949        self.is_planar
950    }
951
952    /// Returns the width of the image in pixels.
953    pub fn width(&self) -> usize {
954        match self.is_planar {
955            true => self.tensor.shape()[2],
956            false => self.tensor.shape()[1],
957        }
958    }
959
960    /// Returns the height of the image in pixels.
961    pub fn height(&self) -> usize {
962        match self.is_planar {
963            true => self.tensor.shape()[1],
964            false => self.tensor.shape()[0],
965        }
966    }
967
968    /// Returns the number of channels in the image.
969    pub fn channels(&self) -> usize {
970        match self.is_planar {
971            true => self.tensor.shape()[0],
972            false => self.tensor.shape()[2],
973        }
974    }
975
976    /// Returns the row stride in bytes.
977    pub fn row_stride(&self) -> usize {
978        match self.is_planar {
979            true => self.width(),
980            false => self.width() * self.channels(),
981        }
982    }
983}
984
985impl TensorImageDst for TensorImageRef<'_> {
986    fn tensor(&self) -> &Tensor<u8> {
987        self.tensor
988    }
989
990    fn tensor_mut(&mut self) -> &mut Tensor<u8> {
991        self.tensor
992    }
993
994    fn fourcc(&self) -> FourCharCode {
995        self.fourcc
996    }
997
998    fn is_planar(&self) -> bool {
999        self.is_planar
1000    }
1001
1002    fn width(&self) -> usize {
1003        TensorImageRef::width(self)
1004    }
1005
1006    fn height(&self) -> usize {
1007        TensorImageRef::height(self)
1008    }
1009
1010    fn channels(&self) -> usize {
1011        TensorImageRef::channels(self)
1012    }
1013
1014    fn row_stride(&self) -> usize {
1015        TensorImageRef::row_stride(self)
1016    }
1017
1018    fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity {
1019        self.tensor.buffer_identity()
1020    }
1021}
1022
1023/// Flips the image, and the rotates it.
1024fn rotate_flip_to_tensor_image(
1025    src: &TensorImage,
1026    rotation: Rotation,
1027    flip: Flip,
1028    memory: Option<TensorMemory>,
1029) -> Result<TensorImage, Error> {
1030    let src_map = src.tensor.map()?;
1031    let dst = match rotation {
1032        Rotation::None | Rotation::Rotate180 => {
1033            TensorImage::new(src.width(), src.height(), src.fourcc(), memory)?
1034        }
1035        Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
1036            TensorImage::new(src.height(), src.width(), src.fourcc(), memory)?
1037        }
1038    };
1039
1040    let mut dst_map = dst.tensor.map()?;
1041
1042    CPUProcessor::flip_rotate_ndarray(&src_map, &mut dst_map, &dst, rotation, flip)?;
1043
1044    Ok(dst)
1045}
1046
1047#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1048pub enum Rotation {
1049    None = 0,
1050    Clockwise90 = 1,
1051    Rotate180 = 2,
1052    CounterClockwise90 = 3,
1053}
1054impl Rotation {
1055    /// Creates a Rotation enum from an angle in degrees. The angle must be a
1056    /// multiple of 90.
1057    ///
1058    /// # Panics
1059    /// Panics if the angle is not a multiple of 90.
1060    ///
1061    /// # Examples
1062    /// ```rust
1063    /// # use edgefirst_image::Rotation;
1064    /// let rotation = Rotation::from_degrees_clockwise(270);
1065    /// assert_eq!(rotation, Rotation::CounterClockwise90);
1066    /// ```
1067    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
1068        match angle.rem_euclid(360) {
1069            0 => Rotation::None,
1070            90 => Rotation::Clockwise90,
1071            180 => Rotation::Rotate180,
1072            270 => Rotation::CounterClockwise90,
1073            _ => panic!("rotation angle is not a multiple of 90"),
1074        }
1075    }
1076}
1077
1078#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1079pub enum Flip {
1080    None = 0,
1081    Vertical = 1,
1082    Horizontal = 2,
1083}
1084
1085#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1086pub struct Crop {
1087    pub src_rect: Option<Rect>,
1088    pub dst_rect: Option<Rect>,
1089    pub dst_color: Option<[u8; 4]>,
1090}
1091
1092impl Default for Crop {
1093    fn default() -> Self {
1094        Crop::new()
1095    }
1096}
1097impl Crop {
1098    // Creates a new Crop with default values (no cropping).
1099    pub fn new() -> Self {
1100        Crop {
1101            src_rect: None,
1102            dst_rect: None,
1103            dst_color: None,
1104        }
1105    }
1106
1107    // Sets the source rectangle for cropping.
1108    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
1109        self.src_rect = src_rect;
1110        self
1111    }
1112
1113    // Sets the destination rectangle for cropping.
1114    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
1115        self.dst_rect = dst_rect;
1116        self
1117    }
1118
1119    // Sets the destination color for areas outside the cropped region.
1120    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
1121        self.dst_color = dst_color;
1122        self
1123    }
1124
1125    // Creates a new Crop with no cropping.
1126    pub fn no_crop() -> Self {
1127        Crop::new()
1128    }
1129
1130    // Checks if the crop rectangles are valid for the given source and
1131    // destination images.
1132    pub fn check_crop(&self, src: &TensorImage, dst: &TensorImage) -> Result<(), Error> {
1133        let src = self.src_rect.is_none_or(|x| x.check_rect(src));
1134        let dst = self.dst_rect.is_none_or(|x| x.check_rect(dst));
1135        match (src, dst) {
1136            (true, true) => Ok(()),
1137            (true, false) => Err(Error::CropInvalid(format!(
1138                "Dest crop invalid: {:?}",
1139                self.dst_rect
1140            ))),
1141            (false, true) => Err(Error::CropInvalid(format!(
1142                "Src crop invalid: {:?}",
1143                self.src_rect
1144            ))),
1145            (false, false) => Err(Error::CropInvalid(format!(
1146                "Dest and Src crop invalid: {:?} {:?}",
1147                self.dst_rect, self.src_rect
1148            ))),
1149        }
1150    }
1151
1152    // Checks if the crop rectangles are valid for the given source and
1153    // destination images (using TensorImageRef for destination).
1154    pub fn check_crop_ref(&self, src: &TensorImage, dst: &TensorImageRef<'_>) -> Result<(), Error> {
1155        let src = self.src_rect.is_none_or(|x| x.check_rect(src));
1156        let dst = self.dst_rect.is_none_or(|x| x.check_rect_dst(dst));
1157        match (src, dst) {
1158            (true, true) => Ok(()),
1159            (true, false) => Err(Error::CropInvalid(format!(
1160                "Dest crop invalid: {:?}",
1161                self.dst_rect
1162            ))),
1163            (false, true) => Err(Error::CropInvalid(format!(
1164                "Src crop invalid: {:?}",
1165                self.src_rect
1166            ))),
1167            (false, false) => Err(Error::CropInvalid(format!(
1168                "Dest and Src crop invalid: {:?} {:?}",
1169                self.dst_rect, self.src_rect
1170            ))),
1171        }
1172    }
1173}
1174
1175#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1176pub struct Rect {
1177    pub left: usize,
1178    pub top: usize,
1179    pub width: usize,
1180    pub height: usize,
1181}
1182
1183impl Rect {
1184    // Creates a new Rect with the specified left, top, width, and height.
1185    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
1186        Self {
1187            left,
1188            top,
1189            width,
1190            height,
1191        }
1192    }
1193
1194    // Checks if the rectangle is valid for the given image.
1195    pub fn check_rect(&self, image: &TensorImage) -> bool {
1196        self.left + self.width <= image.width() && self.top + self.height <= image.height()
1197    }
1198
1199    // Checks if the rectangle is valid for the given destination image.
1200    pub fn check_rect_dst<D: TensorImageDst>(&self, image: &D) -> bool {
1201        self.left + self.width <= image.width() && self.top + self.height <= image.height()
1202    }
1203}
1204
1205#[enum_dispatch(ImageProcessor)]
1206pub trait ImageProcessorTrait {
1207    /// Converts the source image to the destination image format and size. The
1208    /// image is cropped first, then flipped, then rotated
1209    ///
1210    /// # Arguments
1211    ///
1212    /// * `dst` - The destination image to be converted to.
1213    /// * `src` - The source image to convert from.
1214    /// * `rotation` - The rotation to apply to the destination image.
1215    /// * `flip` - Flips the image
1216    /// * `crop` - An optional rectangle specifying the area to crop from the
1217    ///   source image
1218    ///
1219    /// # Returns
1220    ///
1221    /// A `Result` indicating success or failure of the conversion.
1222    fn convert(
1223        &mut self,
1224        src: &TensorImage,
1225        dst: &mut TensorImage,
1226        rotation: Rotation,
1227        flip: Flip,
1228        crop: Crop,
1229    ) -> Result<()>;
1230
1231    /// Converts the source image to a borrowed destination tensor for zero-copy
1232    /// preprocessing.
1233    ///
1234    /// This variant accepts a `TensorImageRef` as the destination, enabling
1235    /// direct writes into external buffers (e.g., model input tensors) without
1236    /// intermediate copies.
1237    ///
1238    /// # Arguments
1239    ///
1240    /// * `src` - The source image to convert from.
1241    /// * `dst` - A borrowed tensor image wrapping the destination buffer.
1242    /// * `rotation` - The rotation to apply to the destination image.
1243    /// * `flip` - Flips the image
1244    /// * `crop` - An optional rectangle specifying the area to crop from the
1245    ///   source image
1246    ///
1247    /// # Returns
1248    ///
1249    /// A `Result` indicating success or failure of the conversion.
1250    fn convert_ref(
1251        &mut self,
1252        src: &TensorImage,
1253        dst: &mut TensorImageRef<'_>,
1254        rotation: Rotation,
1255        flip: Flip,
1256        crop: Crop,
1257    ) -> Result<()>;
1258
1259    /// Draw pre-decoded masks onto image.
1260    fn draw_masks(
1261        &mut self,
1262        dst: &mut TensorImage,
1263        detect: &[DetectBox],
1264        segmentation: &[Segmentation],
1265    ) -> Result<()>;
1266
1267    /// Draw masks from proto data onto image (fused decode+draw).
1268    ///
1269    /// For YOLO segmentation models, this avoids materializing intermediate
1270    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
1271    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly.
1272    fn draw_masks_proto(
1273        &mut self,
1274        dst: &mut TensorImage,
1275        detect: &[DetectBox],
1276        proto_data: &ProtoData,
1277    ) -> Result<()>;
1278
1279    /// Decode masks to atlas buffer (internal, used by decode_masks).
1280    ///
1281    /// The atlas is a compact vertical strip where each detection occupies a
1282    /// strip sized to its padded bounding box (not the full output resolution).
1283    ///
1284    /// Returns `(atlas_pixels, regions)` where `regions` describes each
1285    /// detection's location and bbox within the atlas.
1286    fn decode_masks_atlas(
1287        &mut self,
1288        detect: &[DetectBox],
1289        proto_data: ProtoData,
1290        output_width: usize,
1291        output_height: usize,
1292    ) -> Result<(Vec<u8>, Vec<MaskRegion>)>;
1293
1294    /// Sets the colors used for rendering segmentation masks. Up to 17 colors
1295    /// can be set.
1296    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
1297}
1298
1299/// Configuration for [`ImageProcessor`] construction.
1300///
1301/// Use with [`ImageProcessor::with_config`] to override the default EGL
1302/// display auto-detection. The default configuration (all fields `None`)
1303/// preserves the existing auto-detection behaviour.
1304#[derive(Debug, Clone, Default)]
1305pub struct ImageProcessorConfig {
1306    /// Force OpenGL to use this EGL display type instead of auto-detecting.
1307    ///
1308    /// When `None`, the processor probes displays in priority order: GBM,
1309    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
1310    /// which displays are available on the current system.
1311    ///
1312    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
1313    #[cfg(target_os = "linux")]
1314    #[cfg(feature = "opengl")]
1315    pub egl_display: Option<EglDisplayKind>,
1316}
1317
1318/// Image converter that uses available hardware acceleration or CPU as a
1319/// fallback.
1320#[derive(Debug)]
1321pub struct ImageProcessor {
1322    /// CPU-based image converter as a fallback. This is only None if the
1323    /// EDGEFIRST_DISABLE_CPU environment variable is set.
1324    pub cpu: Option<CPUProcessor>,
1325
1326    #[cfg(target_os = "linux")]
1327    /// G2D-based image converter for Linux systems. This is only available if
1328    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
1329    /// is available.
1330    pub g2d: Option<G2DProcessor>,
1331    #[cfg(target_os = "linux")]
1332    #[cfg(feature = "opengl")]
1333    /// OpenGL-based image converter for Linux systems. This is only available
1334    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
1335    /// ES is available.
1336    pub opengl: Option<GLProcessorThreaded>,
1337}
1338
1339unsafe impl Send for ImageProcessor {}
1340unsafe impl Sync for ImageProcessor {}
1341
1342impl ImageProcessor {
1343    /// Creates a new `ImageProcessor` instance, initializing available
1344    /// hardware converters based on the system capabilities and environment
1345    /// variables.
1346    ///
1347    /// # Examples
1348    /// ```rust
1349    /// # use edgefirst_image::{ImageProcessor, TensorImage, RGBA, RGB, Rotation, Flip, Crop, ImageProcessorTrait};
1350    /// # fn main() -> Result<(), edgefirst_image::Error> {
1351    /// let image = include_bytes!("../../../testdata/zidane.jpg");
1352    /// let img = TensorImage::load(image, Some(RGBA), None)?;
1353    /// let mut converter = ImageProcessor::new()?;
1354    /// let mut dst = TensorImage::new(640, 480, RGB, None)?;
1355    /// converter.convert(&img, &mut dst, Rotation::None, Flip::None, Crop::default())?;
1356    /// # Ok(())
1357    /// # }
1358    pub fn new() -> Result<Self> {
1359        Self::with_config(ImageProcessorConfig::default())
1360    }
1361
1362    /// Creates a new `ImageProcessor` with the given configuration.
1363    ///
1364    /// This allows overriding the EGL display type used for OpenGL
1365    /// acceleration. The `EDGEFIRST_DISABLE_GL=1` environment variable
1366    /// still takes precedence over any override.
1367    #[allow(unused_variables)]
1368    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
1369        #[cfg(target_os = "linux")]
1370        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
1371            .map(|x| x != "0" && x.to_lowercase() != "false")
1372            .unwrap_or(false)
1373        {
1374            log::debug!("EDGEFIRST_DISABLE_G2D is set");
1375            None
1376        } else {
1377            match G2DProcessor::new() {
1378                Ok(g2d_converter) => Some(g2d_converter),
1379                Err(err) => {
1380                    log::warn!("Failed to initialize G2D converter: {err:?}");
1381                    None
1382                }
1383            }
1384        };
1385
1386        #[cfg(target_os = "linux")]
1387        #[cfg(feature = "opengl")]
1388        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
1389            .map(|x| x != "0" && x.to_lowercase() != "false")
1390            .unwrap_or(false)
1391        {
1392            log::debug!("EDGEFIRST_DISABLE_GL is set");
1393            None
1394        } else {
1395            match GLProcessorThreaded::new(config.egl_display) {
1396                Ok(gl_converter) => Some(gl_converter),
1397                Err(err) => {
1398                    log::warn!("Failed to initialize GL converter: {err:?}");
1399                    None
1400                }
1401            }
1402        };
1403
1404        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
1405            .map(|x| x != "0" && x.to_lowercase() != "false")
1406            .unwrap_or(false)
1407        {
1408            log::debug!("EDGEFIRST_DISABLE_CPU is set");
1409            None
1410        } else {
1411            Some(CPUProcessor::new())
1412        };
1413        Ok(Self {
1414            cpu,
1415            #[cfg(target_os = "linux")]
1416            g2d,
1417            #[cfg(target_os = "linux")]
1418            #[cfg(feature = "opengl")]
1419            opengl,
1420        })
1421    }
1422
1423    /// Sets the interpolation mode for int8 proto textures on the OpenGL
1424    /// backend. No-op if OpenGL is not available.
1425    #[cfg(target_os = "linux")]
1426    #[cfg(feature = "opengl")]
1427    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
1428        if let Some(ref mut gl) = self.opengl {
1429            gl.set_int8_interpolation_mode(mode)?;
1430        }
1431        Ok(())
1432    }
1433
1434    /// Create a `TensorImage` with the best available memory backend.
1435    ///
1436    /// Priority: DMA-buf → PBO → system memory.
1437    ///
1438    /// # Arguments
1439    ///
1440    /// * `width` - Image width in pixels
1441    /// * `height` - Image height in pixels
1442    /// * `fourcc` - Pixel format as a FourCC code
1443    ///
1444    /// # Returns
1445    ///
1446    /// A `TensorImage` backed by the highest-performance memory type
1447    /// available on this system.
1448    ///
1449    /// # Errors
1450    ///
1451    /// Returns an error if all allocation strategies fail.
1452    pub fn create_image(
1453        &self,
1454        width: usize,
1455        height: usize,
1456        fourcc: four_char_code::FourCharCode,
1457    ) -> Result<TensorImage> {
1458        // Try DMA first on Linux — skip only when GL has explicitly selected PBO
1459        // as the preferred transfer path (PBO is better than DMA in that case).
1460        #[cfg(target_os = "linux")]
1461        {
1462            #[cfg(feature = "opengl")]
1463            let gl_uses_pbo = self
1464                .opengl
1465                .as_ref()
1466                .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
1467            #[cfg(not(feature = "opengl"))]
1468            let gl_uses_pbo = false;
1469
1470            if !gl_uses_pbo {
1471                if let Ok(img) = TensorImage::new(
1472                    width,
1473                    height,
1474                    fourcc,
1475                    Some(edgefirst_tensor::TensorMemory::Dma),
1476                ) {
1477                    return Ok(img);
1478                }
1479            }
1480        }
1481
1482        // Try PBO (if GL available)
1483        #[cfg(target_os = "linux")]
1484        #[cfg(feature = "opengl")]
1485        if let Some(gl) = &self.opengl {
1486            match gl.create_pbo_image(width, height, fourcc) {
1487                Ok(img) => return Ok(img),
1488                Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
1489            }
1490        }
1491
1492        // Fallback to Mem
1493        TensorImage::new(
1494            width,
1495            height,
1496            fourcc,
1497            Some(edgefirst_tensor::TensorMemory::Mem),
1498        )
1499    }
1500}
1501
1502impl ImageProcessorTrait for ImageProcessor {
1503    /// Converts the source image to the destination image format and size. The
1504    /// image is cropped first, then flipped, then rotated
1505    ///
1506    /// Prefer hardware accelerators when available, falling back to CPU if
1507    /// necessary.
1508    fn convert(
1509        &mut self,
1510        src: &TensorImage,
1511        dst: &mut TensorImage,
1512        rotation: Rotation,
1513        flip: Flip,
1514        crop: Crop,
1515    ) -> Result<()> {
1516        let start = Instant::now();
1517
1518        #[cfg(target_os = "linux")]
1519        if let Some(g2d) = self.g2d.as_mut() {
1520            log::trace!("image started with g2d in {:?}", start.elapsed());
1521            match g2d.convert(src, dst, rotation, flip, crop) {
1522                Ok(_) => {
1523                    log::trace!("image converted with g2d in {:?}", start.elapsed());
1524                    return Ok(());
1525                }
1526                Err(e) => {
1527                    log::trace!("image didn't convert with g2d: {e:?}")
1528                }
1529            }
1530        }
1531
1532        // if the image is just a copy without an resizing, the send it to the CPU and
1533        // skip OpenGL
1534        let src_shape = match crop.src_rect {
1535            Some(s) => (s.width, s.height),
1536            None => (src.width(), src.height()),
1537        };
1538        let dst_shape = match crop.dst_rect {
1539            Some(d) => (d.width, d.height),
1540            None => (dst.width(), dst.height()),
1541        };
1542
1543        // TODO: Check if still use CPU when rotation or flip is enabled
1544        if src_shape == dst_shape && flip == Flip::None && rotation == Rotation::None {
1545            if let Some(cpu) = self.cpu.as_mut() {
1546                match cpu.convert(src, dst, rotation, flip, crop) {
1547                    Ok(_) => {
1548                        log::trace!("image converted with cpu in {:?}", start.elapsed());
1549                        return Ok(());
1550                    }
1551                    Err(e) => {
1552                        log::trace!("image didn't convert with cpu: {e:?}");
1553                        return Err(e);
1554                    }
1555                }
1556            }
1557        }
1558
1559        #[cfg(target_os = "linux")]
1560        #[cfg(feature = "opengl")]
1561        if let Some(opengl) = self.opengl.as_mut() {
1562            log::trace!("image started with opengl in {:?}", start.elapsed());
1563            match opengl.convert(src, dst, rotation, flip, crop) {
1564                Ok(_) => {
1565                    log::trace!("image converted with opengl in {:?}", start.elapsed());
1566                    return Ok(());
1567                }
1568                Err(e) => {
1569                    log::trace!("image didn't convert with opengl: {e:?}")
1570                }
1571            }
1572        }
1573        log::trace!("image started with cpu in {:?}", start.elapsed());
1574        if let Some(cpu) = self.cpu.as_mut() {
1575            match cpu.convert(src, dst, rotation, flip, crop) {
1576                Ok(_) => {
1577                    log::trace!("image converted with cpu in {:?}", start.elapsed());
1578                    return Ok(());
1579                }
1580                Err(e) => {
1581                    log::trace!("image didn't convert with cpu: {e:?}");
1582                    return Err(e);
1583                }
1584            }
1585        }
1586        Err(Error::NoConverter)
1587    }
1588
1589    fn convert_ref(
1590        &mut self,
1591        src: &TensorImage,
1592        dst: &mut TensorImageRef<'_>,
1593        rotation: Rotation,
1594        flip: Flip,
1595        crop: Crop,
1596    ) -> Result<()> {
1597        let start = Instant::now();
1598
1599        // For TensorImageRef, we prefer CPU since hardware accelerators typically
1600        // don't support PLANAR_RGB output which is the common model input format.
1601        // The CPU path uses the generic conversion functions that work with any
1602        // TensorImageDst implementation.
1603        if let Some(cpu) = self.cpu.as_mut() {
1604            match cpu.convert_ref(src, dst, rotation, flip, crop) {
1605                Ok(_) => {
1606                    log::trace!("image converted with cpu (ref) in {:?}", start.elapsed());
1607                    return Ok(());
1608                }
1609                Err(e) => {
1610                    log::trace!("image didn't convert with cpu (ref): {e:?}");
1611                    return Err(e);
1612                }
1613            }
1614        }
1615
1616        Err(Error::NoConverter)
1617    }
1618
1619    fn draw_masks(
1620        &mut self,
1621        dst: &mut TensorImage,
1622        detect: &[DetectBox],
1623        segmentation: &[Segmentation],
1624    ) -> Result<()> {
1625        let start = Instant::now();
1626
1627        if detect.is_empty() && segmentation.is_empty() {
1628            return Ok(());
1629        }
1630
1631        // skip G2D as it doesn't support rendering to image
1632
1633        #[cfg(target_os = "linux")]
1634        #[cfg(feature = "opengl")]
1635        if let Some(opengl) = self.opengl.as_mut() {
1636            log::trace!("draw_masks started with opengl in {:?}", start.elapsed());
1637            match opengl.draw_masks(dst, detect, segmentation) {
1638                Ok(_) => {
1639                    log::trace!("draw_masks with opengl in {:?}", start.elapsed());
1640                    return Ok(());
1641                }
1642                Err(e) => {
1643                    log::trace!("draw_masks didn't work with opengl: {e:?}")
1644                }
1645            }
1646        }
1647        log::trace!("draw_masks started with cpu in {:?}", start.elapsed());
1648        if let Some(cpu) = self.cpu.as_mut() {
1649            match cpu.draw_masks(dst, detect, segmentation) {
1650                Ok(_) => {
1651                    log::trace!("draw_masks with cpu in {:?}", start.elapsed());
1652                    return Ok(());
1653                }
1654                Err(e) => {
1655                    log::trace!("draw_masks didn't work with cpu: {e:?}");
1656                    return Err(e);
1657                }
1658            }
1659        }
1660        Err(Error::NoConverter)
1661    }
1662
1663    fn draw_masks_proto(
1664        &mut self,
1665        dst: &mut TensorImage,
1666        detect: &[DetectBox],
1667        proto_data: &ProtoData,
1668    ) -> Result<()> {
1669        let start = Instant::now();
1670
1671        if detect.is_empty() {
1672            return Ok(());
1673        }
1674
1675        // skip G2D as it doesn't support rendering to image
1676
1677        #[cfg(target_os = "linux")]
1678        #[cfg(feature = "opengl")]
1679        if let Some(opengl) = self.opengl.as_mut() {
1680            log::trace!(
1681                "draw_masks_proto started with opengl in {:?}",
1682                start.elapsed()
1683            );
1684            match opengl.draw_masks_proto(dst, detect, proto_data) {
1685                Ok(_) => {
1686                    log::trace!("draw_masks_proto with opengl in {:?}", start.elapsed());
1687                    return Ok(());
1688                }
1689                Err(e) => {
1690                    log::trace!("draw_masks_proto didn't work with opengl: {e:?}")
1691                }
1692            }
1693        }
1694        log::trace!("draw_masks_proto started with cpu in {:?}", start.elapsed());
1695        if let Some(cpu) = self.cpu.as_mut() {
1696            match cpu.draw_masks_proto(dst, detect, proto_data) {
1697                Ok(_) => {
1698                    log::trace!("draw_masks_proto with cpu in {:?}", start.elapsed());
1699                    return Ok(());
1700                }
1701                Err(e) => {
1702                    log::trace!("draw_masks_proto didn't work with cpu: {e:?}");
1703                    return Err(e);
1704                }
1705            }
1706        }
1707        Err(Error::NoConverter)
1708    }
1709
1710    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1711        let start = Instant::now();
1712
1713        // skip G2D as it doesn't support rendering to image
1714
1715        #[cfg(target_os = "linux")]
1716        #[cfg(feature = "opengl")]
1717        if let Some(opengl) = self.opengl.as_mut() {
1718            log::trace!("image started with opengl in {:?}", start.elapsed());
1719            match opengl.set_class_colors(colors) {
1720                Ok(_) => {
1721                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1722                    return Ok(());
1723                }
1724                Err(e) => {
1725                    log::trace!("colors didn't set with opengl: {e:?}")
1726                }
1727            }
1728        }
1729        log::trace!("image started with cpu in {:?}", start.elapsed());
1730        if let Some(cpu) = self.cpu.as_mut() {
1731            match cpu.set_class_colors(colors) {
1732                Ok(_) => {
1733                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1734                    return Ok(());
1735                }
1736                Err(e) => {
1737                    log::trace!("colors didn't set with cpu: {e:?}");
1738                    return Err(e);
1739                }
1740            }
1741        }
1742        Err(Error::NoConverter)
1743    }
1744
1745    fn decode_masks_atlas(
1746        &mut self,
1747        detect: &[DetectBox],
1748        proto_data: ProtoData,
1749        output_width: usize,
1750        output_height: usize,
1751    ) -> Result<(Vec<u8>, Vec<MaskRegion>)> {
1752        if detect.is_empty() {
1753            return Ok((Vec::new(), Vec::new()));
1754        }
1755
1756        #[cfg(target_os = "linux")]
1757        #[cfg(feature = "opengl")]
1758        {
1759            let has_opengl = self.opengl.is_some();
1760            if has_opengl {
1761                let opengl = self.opengl.as_mut().unwrap();
1762                match opengl.decode_masks_atlas(detect, proto_data, output_width, output_height) {
1763                    Ok(r) => return Ok(r),
1764                    Err(e) => {
1765                        log::trace!("decode_masks_atlas didn't work with opengl: {e:?}");
1766                        return Err(e);
1767                    }
1768                }
1769            }
1770        }
1771        // CPU fallback: render per-detection masks and pack into compact atlas
1772        if let Some(cpu) = self.cpu.as_mut() {
1773            return cpu.decode_masks_atlas(detect, proto_data, output_width, output_height);
1774        }
1775        Err(Error::NoConverter)
1776    }
1777}
1778
1779fn fourcc_channels(fourcc: FourCharCode) -> Result<usize> {
1780    match fourcc {
1781        RGBA => Ok(4),        // RGBA has 4 channels (R, G, B, A)
1782        RGB => Ok(3),         // RGB has 3 channels (R, G, B)
1783        YUYV | VYUY => Ok(2), // YUYV/VYUY has 2 channels (Y and UV)
1784        GREY => Ok(1),        // Y800 has 1 channel (Y)
1785        NV12 => Ok(2),        // NV12 has 2 channel. 2nd channel is half empty
1786        NV16 => Ok(2),        // NV16 has 2 channel. 2nd channel is full size
1787        PLANAR_RGB => Ok(3),
1788        PLANAR_RGBA => Ok(4),
1789        RGB_INT8 => Ok(3),
1790        PLANAR_RGB_INT8 => Ok(3),
1791        _ => Err(Error::NotSupported(format!(
1792            "Unsupported fourcc: {}",
1793            fourcc.to_string()
1794        ))),
1795    }
1796}
1797
1798fn fourcc_planar(fourcc: FourCharCode) -> Result<bool> {
1799    match fourcc {
1800        RGBA => Ok(false),        // RGBA has 4 channels (R, G, B, A)
1801        RGB => Ok(false),         // RGB has 3 channels (R, G, B)
1802        YUYV | VYUY => Ok(false), // YUYV/VYUY has 2 channels (Y and UV)
1803        GREY => Ok(false),        // Y800 has 1 channel (Y)
1804        NV12 => Ok(true),         // Planar YUV
1805        NV16 => Ok(true),         // Planar YUV
1806        PLANAR_RGB => Ok(true),   // Planar RGB
1807        PLANAR_RGBA => Ok(true),  // Planar RGBA
1808        RGB_INT8 => Ok(false),
1809        PLANAR_RGB_INT8 => Ok(true),
1810        _ => Err(Error::NotSupported(format!(
1811            "Unsupported fourcc: {}",
1812            fourcc.to_string()
1813        ))),
1814    }
1815}
1816
1817/// Returns `true` if the format uses XOR 0x80 int8 reinterpretation.
1818pub(crate) fn fourcc_is_int8(fourcc: FourCharCode) -> bool {
1819    matches!(fourcc, RGB_INT8 | PLANAR_RGB_INT8)
1820}
1821
1822/// Returns the uint8 equivalent of an int8 format, or the format unchanged.
1823#[allow(dead_code)] // Will be used by Task 5 (non-DMA int8 path)
1824pub(crate) fn fourcc_uint8_equivalent(fourcc: FourCharCode) -> FourCharCode {
1825    match fourcc {
1826        RGB_INT8 => RGB,
1827        PLANAR_RGB_INT8 => PLANAR_RGB,
1828        other => other,
1829    }
1830}
1831
1832/// Returns `true` if the format is packed RGB (3 bytes per pixel, interleaved).
1833#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
1834pub(crate) fn fourcc_is_packed_rgb(fourcc: FourCharCode) -> bool {
1835    matches!(fourcc, RGB | RGB_INT8)
1836}
1837
1838pub(crate) struct FunctionTimer<T: Display> {
1839    name: T,
1840    start: std::time::Instant,
1841}
1842
1843impl<T: Display> FunctionTimer<T> {
1844    pub fn new(name: T) -> Self {
1845        Self {
1846            name,
1847            start: std::time::Instant::now(),
1848        }
1849    }
1850}
1851
1852impl<T: Display> Drop for FunctionTimer<T> {
1853    fn drop(&mut self) {
1854        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
1855    }
1856}
1857
1858const DEFAULT_COLORS: [[f32; 4]; 20] = [
1859    [0., 1., 0., 0.7],
1860    [1., 0.5568628, 0., 0.7],
1861    [0.25882353, 0.15294118, 0.13333333, 0.7],
1862    [0.8, 0.7647059, 0.78039216, 0.7],
1863    [0.3137255, 0.3137255, 0.3137255, 0.7],
1864    [0.1411765, 0.3098039, 0.1215686, 0.7],
1865    [1., 0.95686275, 0.5137255, 0.7],
1866    [0.3529412, 0.32156863, 0., 0.7],
1867    [0.4235294, 0.6235294, 0.6509804, 0.7],
1868    [0.5098039, 0.5098039, 0.7294118, 0.7],
1869    [0.00784314, 0.18823529, 0.29411765, 0.7],
1870    [0.0, 0.2706, 1.0, 0.7],
1871    [0.0, 0.0, 0.0, 0.7],
1872    [0.0, 0.5, 0.0, 0.7],
1873    [1.0, 0.0, 0.0, 0.7],
1874    [0.0, 0.0, 1.0, 0.7],
1875    [1.0, 0.5, 0.5, 0.7],
1876    [0.1333, 0.5451, 0.1333, 0.7],
1877    [0.1176, 0.4118, 0.8235, 0.7],
1878    [1., 1., 1., 0.7],
1879];
1880
1881const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
1882    let mut result = [[0; M]; N];
1883    let mut i = 0;
1884    while i < N {
1885        let mut j = 0;
1886        while j < M {
1887            result[i][j] = (a[i][j] * 255.0).round() as u8;
1888            j += 1;
1889        }
1890        i += 1;
1891    }
1892    result
1893}
1894
1895const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
1896
1897#[cfg(test)]
1898#[cfg_attr(coverage_nightly, coverage(off))]
1899mod image_tests {
1900    use super::*;
1901    use crate::{CPUProcessor, Rotation};
1902    #[cfg(target_os = "linux")]
1903    use edgefirst_tensor::is_dma_available;
1904    use edgefirst_tensor::{TensorMapTrait, TensorMemory};
1905    use image::buffer::ConvertBuffer;
1906
1907    #[ctor::ctor]
1908    fn init() {
1909        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
1910    }
1911
1912    macro_rules! function {
1913        () => {{
1914            fn f() {}
1915            fn type_name_of<T>(_: T) -> &'static str {
1916                std::any::type_name::<T>()
1917            }
1918            let name = type_name_of(f);
1919
1920            // Find and cut the rest of the path
1921            match &name[..name.len() - 3].rfind(':') {
1922                Some(pos) => &name[pos + 1..name.len() - 3],
1923                None => &name[..name.len() - 3],
1924            }
1925        }};
1926    }
1927
1928    #[test]
1929    fn test_invalid_crop() {
1930        let src = TensorImage::new(100, 100, RGB, None).unwrap();
1931        let dst = TensorImage::new(100, 100, RGB, None).unwrap();
1932
1933        let crop = Crop::new()
1934            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1935            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
1936
1937        let result = crop.check_crop(&src, &dst);
1938        assert!(matches!(
1939            result,
1940            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
1941        ));
1942
1943        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
1944        let result = crop.check_crop(&src, &dst);
1945        assert!(matches!(
1946            result,
1947            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
1948        ));
1949
1950        let crop = crop
1951            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1952            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
1953        let result = crop.check_crop(&src, &dst);
1954        assert!(matches!(
1955            result,
1956            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
1957        ));
1958
1959        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
1960
1961        let result = crop.check_crop(&src, &dst);
1962        assert!(result.is_ok());
1963    }
1964
1965    #[test]
1966    fn test_invalid_tensor() -> Result<(), Error> {
1967        let tensor = Tensor::new(&[720, 1280, 4, 1], None, None)?;
1968        let result = TensorImage::from_tensor(tensor, RGB);
1969        assert!(matches!(
1970            result,
1971            Err(Error::InvalidShape(e)) if e.starts_with("Tensor shape must have 3 dimensions, got")
1972        ));
1973
1974        let tensor = Tensor::new(&[720, 1280, 4], None, None)?;
1975        let result = TensorImage::from_tensor(tensor, RGB);
1976        assert!(matches!(
1977            result,
1978            Err(Error::InvalidShape(e)) if e.starts_with("Invalid tensor shape")
1979        ));
1980
1981        Ok(())
1982    }
1983
1984    #[test]
1985    fn test_invalid_image_file() -> Result<(), Error> {
1986        let result = TensorImage::load(&[123; 5000], None, None);
1987        assert!(matches!(
1988            result,
1989            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1990
1991        Ok(())
1992    }
1993
1994    #[test]
1995    fn test_invalid_jpeg_fourcc() -> Result<(), Error> {
1996        let result = TensorImage::load(&[123; 5000], Some(YUYV), None);
1997        assert!(matches!(
1998            result,
1999            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
2000
2001        Ok(())
2002    }
2003
2004    #[test]
2005    fn test_load_resize_save() {
2006        let file = include_bytes!("../../../testdata/zidane.jpg");
2007        let img = TensorImage::load_jpeg(file, Some(RGBA), None).unwrap();
2008        assert_eq!(img.width(), 1280);
2009        assert_eq!(img.height(), 720);
2010
2011        let mut dst = TensorImage::new(640, 360, RGBA, None).unwrap();
2012        let mut converter = CPUProcessor::new();
2013        converter
2014            .convert(&img, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
2015            .unwrap();
2016        assert_eq!(dst.width(), 640);
2017        assert_eq!(dst.height(), 360);
2018
2019        dst.save_jpeg("zidane_resized.jpg", 80).unwrap();
2020
2021        let file = std::fs::read("zidane_resized.jpg").unwrap();
2022        let img = TensorImage::load_jpeg(&file, None, None).unwrap();
2023        assert_eq!(img.width(), 640);
2024        assert_eq!(img.height(), 360);
2025        assert_eq!(img.fourcc(), RGB);
2026    }
2027
2028    #[test]
2029    fn test_from_tensor_planar() -> Result<(), Error> {
2030        let tensor = Tensor::new(&[3, 720, 1280], None, None)?;
2031        tensor
2032            .map()?
2033            .copy_from_slice(include_bytes!("../../../testdata/camera720p.8bps"));
2034        let planar = TensorImage::from_tensor(tensor, PLANAR_RGB)?;
2035
2036        let rbga = load_bytes_to_tensor(
2037            1280,
2038            720,
2039            RGBA,
2040            None,
2041            include_bytes!("../../../testdata/camera720p.rgba"),
2042        )?;
2043        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
2044
2045        Ok(())
2046    }
2047
2048    #[test]
2049    fn test_from_tensor_invalid_fourcc() {
2050        let tensor = Tensor::new(&[3, 720, 1280], None, None).unwrap();
2051        let result = TensorImage::from_tensor(tensor, four_char_code!("TEST"));
2052        matches!(result, Err(Error::NotSupported(e)) if e.starts_with("Unsupported fourcc : TEST"));
2053    }
2054
2055    #[test]
2056    #[should_panic(expected = "Failed to save planar RGB image")]
2057    fn test_save_planar() {
2058        let planar_img = load_bytes_to_tensor(
2059            1280,
2060            720,
2061            PLANAR_RGB,
2062            None,
2063            include_bytes!("../../../testdata/camera720p.8bps"),
2064        )
2065        .unwrap();
2066
2067        let save_path = "/tmp/planar_rgb.jpg";
2068        planar_img
2069            .save_jpeg(save_path, 90)
2070            .expect("Failed to save planar RGB image");
2071    }
2072
2073    #[test]
2074    #[should_panic(expected = "Failed to save YUYV image")]
2075    fn test_save_yuyv() {
2076        let planar_img = load_bytes_to_tensor(
2077            1280,
2078            720,
2079            YUYV,
2080            None,
2081            include_bytes!("../../../testdata/camera720p.yuyv"),
2082        )
2083        .unwrap();
2084
2085        let save_path = "/tmp/yuyv.jpg";
2086        planar_img
2087            .save_jpeg(save_path, 90)
2088            .expect("Failed to save YUYV image");
2089    }
2090
2091    #[test]
2092    fn test_rotation_angle() {
2093        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
2094        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
2095        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
2096        assert_eq!(
2097            Rotation::from_degrees_clockwise(270),
2098            Rotation::CounterClockwise90
2099        );
2100        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
2101        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
2102        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
2103        assert_eq!(
2104            Rotation::from_degrees_clockwise(630),
2105            Rotation::CounterClockwise90
2106        );
2107    }
2108
2109    #[test]
2110    #[should_panic(expected = "rotation angle is not a multiple of 90")]
2111    fn test_rotation_angle_panic() {
2112        Rotation::from_degrees_clockwise(361);
2113    }
2114
2115    #[test]
2116    fn test_disable_env_var() -> Result<(), Error> {
2117        #[cfg(target_os = "linux")]
2118        {
2119            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2120            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2121            let converter = ImageProcessor::new()?;
2122            match original {
2123                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2124                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2125            }
2126            assert!(converter.g2d.is_none());
2127        }
2128
2129        #[cfg(target_os = "linux")]
2130        #[cfg(feature = "opengl")]
2131        {
2132            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2133            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2134            let converter = ImageProcessor::new()?;
2135            match original {
2136                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2137                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2138            }
2139            assert!(converter.opengl.is_none());
2140        }
2141
2142        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2143        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2144        let converter = ImageProcessor::new()?;
2145        match original {
2146            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2147            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2148        }
2149        assert!(converter.cpu.is_none());
2150
2151        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2152        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2153        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2154        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2155        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2156        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2157        let mut converter = ImageProcessor::new()?;
2158
2159        let src = TensorImage::new(1280, 720, RGBA, None)?;
2160        let mut dst = TensorImage::new(640, 360, RGBA, None)?;
2161        let result = converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop());
2162        assert!(matches!(result, Err(Error::NoConverter)));
2163
2164        match original_cpu {
2165            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2166            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2167        }
2168        match original_gl {
2169            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2170            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2171        }
2172        match original_g2d {
2173            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2174            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2175        }
2176
2177        Ok(())
2178    }
2179
2180    #[test]
2181    fn test_unsupported_conversion() {
2182        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
2183        let mut dst = TensorImage::new(640, 360, NV12, None).unwrap();
2184        let mut converter = ImageProcessor::new().unwrap();
2185        let result = converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop());
2186        log::debug!("result: {:?}", result);
2187        assert!(matches!(
2188            result,
2189            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2190        ));
2191    }
2192
2193    #[test]
2194    fn test_load_grey() {
2195        let grey_img = TensorImage::load_jpeg(
2196            include_bytes!("../../../testdata/grey.jpg"),
2197            Some(RGBA),
2198            None,
2199        )
2200        .unwrap();
2201
2202        let grey_but_rgb_img = TensorImage::load_jpeg(
2203            include_bytes!("../../../testdata/grey-rgb.jpg"),
2204            Some(RGBA),
2205            None,
2206        )
2207        .unwrap();
2208
2209        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2210    }
2211
2212    #[test]
2213    fn test_new_nv12() {
2214        let nv12 = TensorImage::new(1280, 720, NV12, None).unwrap();
2215        assert_eq!(nv12.height(), 720);
2216        assert_eq!(nv12.width(), 1280);
2217        assert_eq!(nv12.fourcc(), NV12);
2218        assert_eq!(nv12.channels(), 2);
2219        assert!(nv12.is_planar())
2220    }
2221
2222    #[test]
2223    #[cfg(target_os = "linux")]
2224    fn test_new_image_converter() {
2225        let dst_width = 640;
2226        let dst_height = 360;
2227        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2228        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2229
2230        let mut converter_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2231        let mut converter = ImageProcessor::new().unwrap();
2232        converter
2233            .convert(
2234                &src,
2235                &mut converter_dst,
2236                Rotation::None,
2237                Flip::None,
2238                Crop::no_crop(),
2239            )
2240            .unwrap();
2241
2242        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2243        let mut cpu_converter = CPUProcessor::new();
2244        cpu_converter
2245            .convert(
2246                &src,
2247                &mut cpu_dst,
2248                Rotation::None,
2249                Flip::None,
2250                Crop::no_crop(),
2251            )
2252            .unwrap();
2253
2254        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2255    }
2256
2257    #[test]
2258    fn test_crop_skip() {
2259        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2260        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2261
2262        let mut converter_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2263        let mut converter = ImageProcessor::new().unwrap();
2264        let crop = Crop::new()
2265            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2266            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2267        converter
2268            .convert(&src, &mut converter_dst, Rotation::None, Flip::None, crop)
2269            .unwrap();
2270
2271        let mut cpu_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2272        let mut cpu_converter = CPUProcessor::new();
2273        cpu_converter
2274            .convert(&src, &mut cpu_dst, Rotation::None, Flip::None, crop)
2275            .unwrap();
2276
2277        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2278    }
2279
2280    #[test]
2281    fn test_invalid_fourcc() {
2282        let result = TensorImage::new(1280, 720, four_char_code!("TEST"), None);
2283        assert!(matches!(
2284            result,
2285            Err(Error::NotSupported(e)) if e == "Unsupported fourcc: TEST"
2286        ));
2287    }
2288
2289    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2290    #[cfg(target_os = "linux")]
2291    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2292
2293    #[cfg(target_os = "linux")]
2294    fn is_g2d_available() -> bool {
2295        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2296    }
2297
2298    #[cfg(target_os = "linux")]
2299    #[cfg(feature = "opengl")]
2300    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2301
2302    #[cfg(target_os = "linux")]
2303    #[cfg(feature = "opengl")]
2304    // Helper function to check if OpenGL is available
2305    fn is_opengl_available() -> bool {
2306        #[cfg(all(target_os = "linux", feature = "opengl"))]
2307        {
2308            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2309        }
2310
2311        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2312        {
2313            false
2314        }
2315    }
2316
2317    #[test]
2318    fn test_load_jpeg_with_exif() {
2319        let file = include_bytes!("../../../testdata/zidane_rotated_exif.jpg").to_vec();
2320        let loaded = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2321
2322        assert_eq!(loaded.height(), 1280);
2323        assert_eq!(loaded.width(), 720);
2324
2325        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2326        let cpu_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2327
2328        let (dst_width, dst_height) = (cpu_src.height(), cpu_src.width());
2329
2330        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2331        let mut cpu_converter = CPUProcessor::new();
2332
2333        cpu_converter
2334            .convert(
2335                &cpu_src,
2336                &mut cpu_dst,
2337                Rotation::Clockwise90,
2338                Flip::None,
2339                Crop::no_crop(),
2340            )
2341            .unwrap();
2342
2343        compare_images(&loaded, &cpu_dst, 0.98, function!());
2344    }
2345
2346    #[test]
2347    fn test_load_png_with_exif() {
2348        let file = include_bytes!("../../../testdata/zidane_rotated_exif_180.png").to_vec();
2349        let loaded = TensorImage::load_png(&file, Some(RGBA), None).unwrap();
2350
2351        assert_eq!(loaded.height(), 720);
2352        assert_eq!(loaded.width(), 1280);
2353
2354        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2355        let cpu_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2356
2357        let mut cpu_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2358        let mut cpu_converter = CPUProcessor::new();
2359
2360        cpu_converter
2361            .convert(
2362                &cpu_src,
2363                &mut cpu_dst,
2364                Rotation::Rotate180,
2365                Flip::None,
2366                Crop::no_crop(),
2367            )
2368            .unwrap();
2369
2370        compare_images(&loaded, &cpu_dst, 0.98, function!());
2371    }
2372
2373    #[test]
2374    #[cfg(target_os = "linux")]
2375    fn test_g2d_resize() {
2376        if !is_g2d_available() {
2377            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2378            return;
2379        }
2380        if !is_dma_available() {
2381            eprintln!(
2382                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2383            );
2384            return;
2385        }
2386
2387        let dst_width = 640;
2388        let dst_height = 360;
2389        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2390        let src = TensorImage::load_jpeg(&file, Some(RGBA), Some(TensorMemory::Dma)).unwrap();
2391
2392        let mut g2d_dst =
2393            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
2394        let mut g2d_converter = G2DProcessor::new().unwrap();
2395        g2d_converter
2396            .convert(
2397                &src,
2398                &mut g2d_dst,
2399                Rotation::None,
2400                Flip::None,
2401                Crop::no_crop(),
2402            )
2403            .unwrap();
2404
2405        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2406        let mut cpu_converter = CPUProcessor::new();
2407        cpu_converter
2408            .convert(
2409                &src,
2410                &mut cpu_dst,
2411                Rotation::None,
2412                Flip::None,
2413                Crop::no_crop(),
2414            )
2415            .unwrap();
2416
2417        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2418    }
2419
2420    #[test]
2421    #[cfg(target_os = "linux")]
2422    #[cfg(feature = "opengl")]
2423    fn test_opengl_resize() {
2424        if !is_opengl_available() {
2425            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2426            return;
2427        }
2428
2429        let dst_width = 640;
2430        let dst_height = 360;
2431        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2432        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2433
2434        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2435        let mut cpu_converter = CPUProcessor::new();
2436        cpu_converter
2437            .convert(
2438                &src,
2439                &mut cpu_dst,
2440                Rotation::None,
2441                Flip::None,
2442                Crop::no_crop(),
2443            )
2444            .unwrap();
2445        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2446        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2447
2448        for _ in 0..5 {
2449            gl_converter
2450                .convert(
2451                    &src,
2452                    &mut gl_dst,
2453                    Rotation::None,
2454                    Flip::None,
2455                    Crop::no_crop(),
2456                )
2457                .unwrap();
2458
2459            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2460        }
2461
2462        drop(gl_dst);
2463    }
2464
2465    #[test]
2466    #[ignore] // Vivante GPU hangs with concurrent EGL contexts on i.MX8MP
2467    #[cfg(target_os = "linux")]
2468    #[cfg(feature = "opengl")]
2469    fn test_opengl_10_threads() {
2470        if !is_opengl_available() {
2471            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2472            return;
2473        }
2474
2475        let handles: Vec<_> = (0..10)
2476            .map(|i| {
2477                std::thread::Builder::new()
2478                    .name(format!("Thread {i}"))
2479                    .spawn(test_opengl_resize)
2480                    .unwrap()
2481            })
2482            .collect();
2483        handles.into_iter().for_each(|h| {
2484            if let Err(e) = h.join() {
2485                std::panic::resume_unwind(e)
2486            }
2487        });
2488    }
2489
2490    #[test]
2491    #[cfg(target_os = "linux")]
2492    #[cfg(feature = "opengl")]
2493    fn test_opengl_grey() {
2494        if !is_opengl_available() {
2495            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2496            return;
2497        }
2498
2499        let img = TensorImage::load_jpeg(
2500            include_bytes!("../../../testdata/grey.jpg"),
2501            Some(GREY),
2502            None,
2503        )
2504        .unwrap();
2505
2506        let mut gl_dst = TensorImage::new(640, 640, GREY, None).unwrap();
2507        let mut cpu_dst = TensorImage::new(640, 640, GREY, None).unwrap();
2508
2509        let mut converter = CPUProcessor::new();
2510
2511        converter
2512            .convert(
2513                &img,
2514                &mut cpu_dst,
2515                Rotation::None,
2516                Flip::None,
2517                Crop::no_crop(),
2518            )
2519            .unwrap();
2520
2521        let mut gl = GLProcessorThreaded::new(None).unwrap();
2522        gl.convert(
2523            &img,
2524            &mut gl_dst,
2525            Rotation::None,
2526            Flip::None,
2527            Crop::no_crop(),
2528        )
2529        .unwrap();
2530
2531        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2532    }
2533
2534    #[test]
2535    #[cfg(target_os = "linux")]
2536    fn test_g2d_src_crop() {
2537        if !is_g2d_available() {
2538            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2539            return;
2540        }
2541        if !is_dma_available() {
2542            eprintln!(
2543                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2544            );
2545            return;
2546        }
2547
2548        let dst_width = 640;
2549        let dst_height = 640;
2550        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2551        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2552
2553        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2554        let mut cpu_converter = CPUProcessor::new();
2555        cpu_converter
2556            .convert(
2557                &src,
2558                &mut cpu_dst,
2559                Rotation::None,
2560                Flip::None,
2561                Crop {
2562                    src_rect: Some(Rect {
2563                        left: 0,
2564                        top: 0,
2565                        width: 640,
2566                        height: 360,
2567                    }),
2568                    dst_rect: None,
2569                    dst_color: None,
2570                },
2571            )
2572            .unwrap();
2573
2574        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2575        let mut g2d_converter = G2DProcessor::new().unwrap();
2576        g2d_converter
2577            .convert(
2578                &src,
2579                &mut g2d_dst,
2580                Rotation::None,
2581                Flip::None,
2582                Crop {
2583                    src_rect: Some(Rect {
2584                        left: 0,
2585                        top: 0,
2586                        width: 640,
2587                        height: 360,
2588                    }),
2589                    dst_rect: None,
2590                    dst_color: None,
2591                },
2592            )
2593            .unwrap();
2594
2595        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2596    }
2597
2598    #[test]
2599    #[cfg(target_os = "linux")]
2600    fn test_g2d_dst_crop() {
2601        if !is_g2d_available() {
2602            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2603            return;
2604        }
2605        if !is_dma_available() {
2606            eprintln!(
2607                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2608            );
2609            return;
2610        }
2611
2612        let dst_width = 640;
2613        let dst_height = 640;
2614        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2615        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2616
2617        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2618        let mut cpu_converter = CPUProcessor::new();
2619        cpu_converter
2620            .convert(
2621                &src,
2622                &mut cpu_dst,
2623                Rotation::None,
2624                Flip::None,
2625                Crop {
2626                    src_rect: None,
2627                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2628                    dst_color: None,
2629                },
2630            )
2631            .unwrap();
2632
2633        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2634        let mut g2d_converter = G2DProcessor::new().unwrap();
2635        g2d_converter
2636            .convert(
2637                &src,
2638                &mut g2d_dst,
2639                Rotation::None,
2640                Flip::None,
2641                Crop {
2642                    src_rect: None,
2643                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2644                    dst_color: None,
2645                },
2646            )
2647            .unwrap();
2648
2649        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2650    }
2651
2652    #[test]
2653    #[cfg(target_os = "linux")]
2654    fn test_g2d_all_rgba() {
2655        if !is_g2d_available() {
2656            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2657            return;
2658        }
2659        if !is_dma_available() {
2660            eprintln!(
2661                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2662            );
2663            return;
2664        }
2665
2666        let dst_width = 640;
2667        let dst_height = 640;
2668        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2669        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2670
2671        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2672        let mut cpu_converter = CPUProcessor::new();
2673        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2674        let mut g2d_converter = G2DProcessor::new().unwrap();
2675
2676        for rot in [
2677            Rotation::None,
2678            Rotation::Clockwise90,
2679            Rotation::Rotate180,
2680            Rotation::CounterClockwise90,
2681        ] {
2682            cpu_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2683            g2d_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2684            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2685                cpu_converter
2686                    .convert(
2687                        &src,
2688                        &mut cpu_dst,
2689                        Rotation::None,
2690                        Flip::None,
2691                        Crop {
2692                            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2693                            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2694                            dst_color: None,
2695                        },
2696                    )
2697                    .unwrap();
2698
2699                g2d_converter
2700                    .convert(
2701                        &src,
2702                        &mut g2d_dst,
2703                        Rotation::None,
2704                        Flip::None,
2705                        Crop {
2706                            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2707                            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2708                            dst_color: None,
2709                        },
2710                    )
2711                    .unwrap();
2712
2713                compare_images(
2714                    &g2d_dst,
2715                    &cpu_dst,
2716                    0.98,
2717                    &format!("{} {:?} {:?}", function!(), rot, flip),
2718                );
2719            }
2720        }
2721    }
2722
2723    #[test]
2724    #[cfg(target_os = "linux")]
2725    #[cfg(feature = "opengl")]
2726    fn test_opengl_src_crop() {
2727        if !is_opengl_available() {
2728            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2729            return;
2730        }
2731
2732        let dst_width = 640;
2733        let dst_height = 360;
2734        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2735        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2736
2737        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2738        let mut cpu_converter = CPUProcessor::new();
2739        cpu_converter
2740            .convert(
2741                &src,
2742                &mut cpu_dst,
2743                Rotation::None,
2744                Flip::None,
2745                Crop {
2746                    src_rect: Some(Rect {
2747                        left: 320,
2748                        top: 180,
2749                        width: 1280 - 320,
2750                        height: 720 - 180,
2751                    }),
2752                    dst_rect: None,
2753                    dst_color: None,
2754                },
2755            )
2756            .unwrap();
2757
2758        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2759        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2760
2761        gl_converter
2762            .convert(
2763                &src,
2764                &mut gl_dst,
2765                Rotation::None,
2766                Flip::None,
2767                Crop {
2768                    src_rect: Some(Rect {
2769                        left: 320,
2770                        top: 180,
2771                        width: 1280 - 320,
2772                        height: 720 - 180,
2773                    }),
2774                    dst_rect: None,
2775                    dst_color: None,
2776                },
2777            )
2778            .unwrap();
2779
2780        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2781    }
2782
2783    #[test]
2784    #[cfg(target_os = "linux")]
2785    #[cfg(feature = "opengl")]
2786    fn test_opengl_dst_crop() {
2787        if !is_opengl_available() {
2788            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2789            return;
2790        }
2791
2792        let dst_width = 640;
2793        let dst_height = 640;
2794        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2795        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2796
2797        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2798        let mut cpu_converter = CPUProcessor::new();
2799        cpu_converter
2800            .convert(
2801                &src,
2802                &mut cpu_dst,
2803                Rotation::None,
2804                Flip::None,
2805                Crop {
2806                    src_rect: None,
2807                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2808                    dst_color: None,
2809                },
2810            )
2811            .unwrap();
2812
2813        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2814        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2815        gl_converter
2816            .convert(
2817                &src,
2818                &mut gl_dst,
2819                Rotation::None,
2820                Flip::None,
2821                Crop {
2822                    src_rect: None,
2823                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2824                    dst_color: None,
2825                },
2826            )
2827            .unwrap();
2828
2829        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2830    }
2831
2832    #[test]
2833    #[cfg(target_os = "linux")]
2834    #[cfg(feature = "opengl")]
2835    fn test_opengl_all_rgba() {
2836        if !is_opengl_available() {
2837            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2838            return;
2839        }
2840
2841        let dst_width = 640;
2842        let dst_height = 640;
2843        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2844
2845        let mut cpu_converter = CPUProcessor::new();
2846
2847        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2848
2849        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
2850        if is_dma_available() {
2851            mem.push(Some(TensorMemory::Dma));
2852        }
2853        for m in mem {
2854            let src = TensorImage::load_jpeg(&file, Some(RGBA), m).unwrap();
2855
2856            for rot in [
2857                Rotation::None,
2858                Rotation::Clockwise90,
2859                Rotation::Rotate180,
2860                Rotation::CounterClockwise90,
2861            ] {
2862                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2863                    let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, m).unwrap();
2864                    let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, m).unwrap();
2865                    cpu_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2866                    gl_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2867                    cpu_converter
2868                        .convert(
2869                            &src,
2870                            &mut cpu_dst,
2871                            Rotation::None,
2872                            Flip::None,
2873                            Crop {
2874                                src_rect: Some(Rect::new(50, 120, 1024, 576)),
2875                                dst_rect: Some(Rect::new(100, 100, 512, 288)),
2876                                dst_color: None,
2877                            },
2878                        )
2879                        .unwrap();
2880
2881                    gl_converter
2882                        .convert(
2883                            &src,
2884                            &mut gl_dst,
2885                            Rotation::None,
2886                            Flip::None,
2887                            Crop {
2888                                src_rect: Some(Rect::new(50, 120, 1024, 576)),
2889                                dst_rect: Some(Rect::new(100, 100, 512, 288)),
2890                                dst_color: None,
2891                            },
2892                        )
2893                        .map_err(|e| {
2894                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
2895                            e
2896                        })
2897                        .unwrap();
2898
2899                    compare_images(
2900                        &gl_dst,
2901                        &cpu_dst,
2902                        0.98,
2903                        &format!("{} {:?} {:?}", function!(), rot, flip),
2904                    );
2905                }
2906            }
2907        }
2908    }
2909
2910    #[test]
2911    #[cfg(target_os = "linux")]
2912    fn test_cpu_rotate() {
2913        for rot in [
2914            Rotation::Clockwise90,
2915            Rotation::Rotate180,
2916            Rotation::CounterClockwise90,
2917        ] {
2918            test_cpu_rotate_(rot);
2919        }
2920    }
2921
2922    #[cfg(target_os = "linux")]
2923    fn test_cpu_rotate_(rot: Rotation) {
2924        // This test rotates the image 4 times and checks that the image was returned to
2925        // be the same Currently doesn't check if rotations actually rotated in
2926        // right direction
2927        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2928
2929        let unchanged_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2930        let mut src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2931
2932        let (dst_width, dst_height) = match rot {
2933            Rotation::None | Rotation::Rotate180 => (src.width(), src.height()),
2934            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src.height(), src.width()),
2935        };
2936
2937        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2938        let mut cpu_converter = CPUProcessor::new();
2939
2940        // After rotating 4 times, the image should be the same as the original
2941
2942        cpu_converter
2943            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
2944            .unwrap();
2945
2946        cpu_converter
2947            .convert(&cpu_dst, &mut src, rot, Flip::None, Crop::no_crop())
2948            .unwrap();
2949
2950        cpu_converter
2951            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
2952            .unwrap();
2953
2954        cpu_converter
2955            .convert(&cpu_dst, &mut src, rot, Flip::None, Crop::no_crop())
2956            .unwrap();
2957
2958        compare_images(&src, &unchanged_src, 0.98, function!());
2959    }
2960
2961    #[test]
2962    #[cfg(target_os = "linux")]
2963    #[cfg(feature = "opengl")]
2964    fn test_opengl_rotate() {
2965        if !is_opengl_available() {
2966            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2967            return;
2968        }
2969
2970        let size = (1280, 720);
2971        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
2972
2973        if is_dma_available() {
2974            mem.push(Some(TensorMemory::Dma));
2975        }
2976        for m in mem {
2977            for rot in [
2978                Rotation::Clockwise90,
2979                Rotation::Rotate180,
2980                Rotation::CounterClockwise90,
2981            ] {
2982                test_opengl_rotate_(size, rot, m);
2983            }
2984        }
2985    }
2986
2987    #[cfg(target_os = "linux")]
2988    #[cfg(feature = "opengl")]
2989    fn test_opengl_rotate_(
2990        size: (usize, usize),
2991        rot: Rotation,
2992        tensor_memory: Option<TensorMemory>,
2993    ) {
2994        let (dst_width, dst_height) = match rot {
2995            Rotation::None | Rotation::Rotate180 => size,
2996            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
2997        };
2998
2999        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3000        let src = TensorImage::load_jpeg(&file, Some(RGBA), tensor_memory).unwrap();
3001
3002        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3003        let mut cpu_converter = CPUProcessor::new();
3004
3005        cpu_converter
3006            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
3007            .unwrap();
3008
3009        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, tensor_memory).unwrap();
3010        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3011
3012        for _ in 0..5 {
3013            gl_converter
3014                .convert(&src, &mut gl_dst, rot, Flip::None, Crop::no_crop())
3015                .unwrap();
3016            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3017        }
3018    }
3019
3020    #[test]
3021    #[cfg(target_os = "linux")]
3022    fn test_g2d_rotate() {
3023        if !is_g2d_available() {
3024            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
3025            return;
3026        }
3027        if !is_dma_available() {
3028            eprintln!(
3029                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3030            );
3031            return;
3032        }
3033
3034        let size = (1280, 720);
3035        for rot in [
3036            Rotation::Clockwise90,
3037            Rotation::Rotate180,
3038            Rotation::CounterClockwise90,
3039        ] {
3040            test_g2d_rotate_(size, rot);
3041        }
3042    }
3043
3044    #[cfg(target_os = "linux")]
3045    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
3046        let (dst_width, dst_height) = match rot {
3047            Rotation::None | Rotation::Rotate180 => size,
3048            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3049        };
3050
3051        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3052        let src = TensorImage::load_jpeg(&file, Some(RGBA), Some(TensorMemory::Dma)).unwrap();
3053
3054        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3055        let mut cpu_converter = CPUProcessor::new();
3056
3057        cpu_converter
3058            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
3059            .unwrap();
3060
3061        let mut g2d_dst =
3062            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3063        let mut g2d_converter = G2DProcessor::new().unwrap();
3064
3065        g2d_converter
3066            .convert(&src, &mut g2d_dst, rot, Flip::None, Crop::no_crop())
3067            .unwrap();
3068
3069        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3070    }
3071
3072    #[test]
3073    fn test_rgba_to_yuyv_resize_cpu() {
3074        let src = load_bytes_to_tensor(
3075            1280,
3076            720,
3077            RGBA,
3078            None,
3079            include_bytes!("../../../testdata/camera720p.rgba"),
3080        )
3081        .unwrap();
3082
3083        let (dst_width, dst_height) = (640, 360);
3084
3085        let mut dst = TensorImage::new(dst_width, dst_height, YUYV, None).unwrap();
3086
3087        let mut dst_through_yuyv = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3088        let mut dst_direct = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3089
3090        let mut cpu_converter = CPUProcessor::new();
3091
3092        cpu_converter
3093            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3094            .unwrap();
3095
3096        cpu_converter
3097            .convert(
3098                &dst,
3099                &mut dst_through_yuyv,
3100                Rotation::None,
3101                Flip::None,
3102                Crop::no_crop(),
3103            )
3104            .unwrap();
3105
3106        cpu_converter
3107            .convert(
3108                &src,
3109                &mut dst_direct,
3110                Rotation::None,
3111                Flip::None,
3112                Crop::no_crop(),
3113            )
3114            .unwrap();
3115
3116        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
3117    }
3118
3119    #[test]
3120    #[cfg(target_os = "linux")]
3121    #[cfg(feature = "opengl")]
3122    #[ignore = "opengl doesn't support rendering to YUYV texture"]
3123    fn test_rgba_to_yuyv_resize_opengl() {
3124        if !is_opengl_available() {
3125            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3126            return;
3127        }
3128
3129        if !is_dma_available() {
3130            eprintln!(
3131                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3132                function!()
3133            );
3134            return;
3135        }
3136
3137        let src = load_bytes_to_tensor(
3138            1280,
3139            720,
3140            RGBA,
3141            None,
3142            include_bytes!("../../../testdata/camera720p.rgba"),
3143        )
3144        .unwrap();
3145
3146        let (dst_width, dst_height) = (640, 360);
3147
3148        let mut dst =
3149            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3150
3151        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3152
3153        gl_converter
3154            .convert(
3155                &src,
3156                &mut dst,
3157                Rotation::None,
3158                Flip::None,
3159                Crop::new()
3160                    .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3161                    .with_dst_color(Some([255, 255, 255, 255])),
3162            )
3163            .unwrap();
3164
3165        std::fs::write(
3166            "rgba_to_yuyv_opengl.yuyv",
3167            dst.tensor().map().unwrap().as_slice(),
3168        )
3169        .unwrap();
3170        let mut cpu_dst =
3171            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3172        CPUProcessor::new()
3173            .convert(
3174                &src,
3175                &mut cpu_dst,
3176                Rotation::None,
3177                Flip::None,
3178                Crop::no_crop(),
3179            )
3180            .unwrap();
3181
3182        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3183    }
3184
3185    #[test]
3186    #[cfg(target_os = "linux")]
3187    fn test_rgba_to_yuyv_resize_g2d() {
3188        if !is_g2d_available() {
3189            eprintln!(
3190                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3191            );
3192            return;
3193        }
3194        if !is_dma_available() {
3195            eprintln!(
3196                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3197            );
3198            return;
3199        }
3200
3201        let src = load_bytes_to_tensor(
3202            1280,
3203            720,
3204            RGBA,
3205            Some(TensorMemory::Dma),
3206            include_bytes!("../../../testdata/camera720p.rgba"),
3207        )
3208        .unwrap();
3209
3210        let (dst_width, dst_height) = (1280, 720);
3211
3212        let mut cpu_dst =
3213            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3214
3215        let mut g2d_dst =
3216            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3217
3218        let mut g2d_converter = G2DProcessor::new().unwrap();
3219
3220        g2d_dst.tensor.map().unwrap().as_mut_slice().fill(128);
3221        g2d_converter
3222            .convert(
3223                &src,
3224                &mut g2d_dst,
3225                Rotation::None,
3226                Flip::None,
3227                Crop {
3228                    src_rect: None,
3229                    dst_rect: Some(Rect::new(100, 100, 2, 2)),
3230                    dst_color: None,
3231                },
3232            )
3233            .unwrap();
3234
3235        cpu_dst.tensor.map().unwrap().as_mut_slice().fill(128);
3236        CPUProcessor::new()
3237            .convert(
3238                &src,
3239                &mut cpu_dst,
3240                Rotation::None,
3241                Flip::None,
3242                Crop {
3243                    src_rect: None,
3244                    dst_rect: Some(Rect::new(100, 100, 2, 2)),
3245                    dst_color: None,
3246                },
3247            )
3248            .unwrap();
3249
3250        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3251    }
3252
3253    #[test]
3254    fn test_yuyv_to_rgba_cpu() {
3255        let file = include_bytes!("../../../testdata/camera720p.yuyv").to_vec();
3256        let src = TensorImage::new(1280, 720, YUYV, None).unwrap();
3257        src.tensor()
3258            .map()
3259            .unwrap()
3260            .as_mut_slice()
3261            .copy_from_slice(&file);
3262
3263        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
3264        let mut cpu_converter = CPUProcessor::new();
3265
3266        cpu_converter
3267            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3268            .unwrap();
3269
3270        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3271        target_image
3272            .tensor()
3273            .map()
3274            .unwrap()
3275            .as_mut_slice()
3276            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3277
3278        compare_images(&dst, &target_image, 0.98, function!());
3279    }
3280
3281    #[test]
3282    fn test_yuyv_to_rgb_cpu() {
3283        let file = include_bytes!("../../../testdata/camera720p.yuyv").to_vec();
3284        let src = TensorImage::new(1280, 720, YUYV, None).unwrap();
3285        src.tensor()
3286            .map()
3287            .unwrap()
3288            .as_mut_slice()
3289            .copy_from_slice(&file);
3290
3291        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3292        let mut cpu_converter = CPUProcessor::new();
3293
3294        cpu_converter
3295            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3296            .unwrap();
3297
3298        let target_image = TensorImage::new(1280, 720, RGB, None).unwrap();
3299        target_image
3300            .tensor()
3301            .map()
3302            .unwrap()
3303            .as_mut_slice()
3304            .as_chunks_mut::<3>()
3305            .0
3306            .iter_mut()
3307            .zip(
3308                include_bytes!("../../../testdata/camera720p.rgba")
3309                    .as_chunks::<4>()
3310                    .0,
3311            )
3312            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3313
3314        compare_images(&dst, &target_image, 0.98, function!());
3315    }
3316
3317    #[test]
3318    #[cfg(target_os = "linux")]
3319    fn test_yuyv_to_rgba_g2d() {
3320        if !is_g2d_available() {
3321            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3322            return;
3323        }
3324        if !is_dma_available() {
3325            eprintln!(
3326                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3327            );
3328            return;
3329        }
3330
3331        let src = load_bytes_to_tensor(
3332            1280,
3333            720,
3334            YUYV,
3335            None,
3336            include_bytes!("../../../testdata/camera720p.yuyv"),
3337        )
3338        .unwrap();
3339
3340        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3341        let mut g2d_converter = G2DProcessor::new().unwrap();
3342
3343        g2d_converter
3344            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3345            .unwrap();
3346
3347        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3348        target_image
3349            .tensor()
3350            .map()
3351            .unwrap()
3352            .as_mut_slice()
3353            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3354
3355        compare_images(&dst, &target_image, 0.98, function!());
3356    }
3357
3358    #[test]
3359    #[cfg(target_os = "linux")]
3360    #[cfg(feature = "opengl")]
3361    fn test_yuyv_to_rgba_opengl() {
3362        if !is_opengl_available() {
3363            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3364            return;
3365        }
3366        if !is_dma_available() {
3367            eprintln!(
3368                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3369                function!()
3370            );
3371            return;
3372        }
3373
3374        let src = load_bytes_to_tensor(
3375            1280,
3376            720,
3377            YUYV,
3378            Some(TensorMemory::Dma),
3379            include_bytes!("../../../testdata/camera720p.yuyv"),
3380        )
3381        .unwrap();
3382
3383        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3384        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3385
3386        gl_converter
3387            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3388            .unwrap();
3389
3390        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3391        target_image
3392            .tensor()
3393            .map()
3394            .unwrap()
3395            .as_mut_slice()
3396            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3397
3398        compare_images(&dst, &target_image, 0.98, function!());
3399    }
3400
3401    #[test]
3402    #[cfg(target_os = "linux")]
3403    fn test_yuyv_to_rgb_g2d() {
3404        if !is_g2d_available() {
3405            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3406            return;
3407        }
3408        if !is_dma_available() {
3409            eprintln!(
3410                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3411            );
3412            return;
3413        }
3414
3415        let src = load_bytes_to_tensor(
3416            1280,
3417            720,
3418            YUYV,
3419            None,
3420            include_bytes!("../../../testdata/camera720p.yuyv"),
3421        )
3422        .unwrap();
3423
3424        let mut g2d_dst = TensorImage::new(1280, 720, RGB, Some(TensorMemory::Dma)).unwrap();
3425        let mut g2d_converter = G2DProcessor::new().unwrap();
3426
3427        g2d_converter
3428            .convert(
3429                &src,
3430                &mut g2d_dst,
3431                Rotation::None,
3432                Flip::None,
3433                Crop::no_crop(),
3434            )
3435            .unwrap();
3436
3437        let mut cpu_dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3438        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3439
3440        cpu_converter
3441            .convert(
3442                &src,
3443                &mut cpu_dst,
3444                Rotation::None,
3445                Flip::None,
3446                Crop::no_crop(),
3447            )
3448            .unwrap();
3449
3450        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3451    }
3452
3453    #[test]
3454    #[cfg(target_os = "linux")]
3455    fn test_yuyv_to_yuyv_resize_g2d() {
3456        if !is_g2d_available() {
3457            eprintln!(
3458                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3459            );
3460            return;
3461        }
3462        if !is_dma_available() {
3463            eprintln!(
3464                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3465            );
3466            return;
3467        }
3468
3469        let src = load_bytes_to_tensor(
3470            1280,
3471            720,
3472            YUYV,
3473            None,
3474            include_bytes!("../../../testdata/camera720p.yuyv"),
3475        )
3476        .unwrap();
3477
3478        let mut g2d_dst = TensorImage::new(600, 400, YUYV, Some(TensorMemory::Dma)).unwrap();
3479        let mut g2d_converter = G2DProcessor::new().unwrap();
3480
3481        g2d_converter
3482            .convert(
3483                &src,
3484                &mut g2d_dst,
3485                Rotation::None,
3486                Flip::None,
3487                Crop::no_crop(),
3488            )
3489            .unwrap();
3490
3491        let mut cpu_dst = TensorImage::new(600, 400, YUYV, None).unwrap();
3492        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3493
3494        cpu_converter
3495            .convert(
3496                &src,
3497                &mut cpu_dst,
3498                Rotation::None,
3499                Flip::None,
3500                Crop::no_crop(),
3501            )
3502            .unwrap();
3503
3504        // TODO: compare YUYV and YUYV images without having to convert them to RGB
3505        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
3506    }
3507
3508    #[test]
3509    fn test_yuyv_to_rgba_resize_cpu() {
3510        let src = load_bytes_to_tensor(
3511            1280,
3512            720,
3513            YUYV,
3514            None,
3515            include_bytes!("../../../testdata/camera720p.yuyv"),
3516        )
3517        .unwrap();
3518
3519        let (dst_width, dst_height) = (960, 540);
3520
3521        let mut dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3522        let mut cpu_converter = CPUProcessor::new();
3523
3524        cpu_converter
3525            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3526            .unwrap();
3527
3528        let mut dst_target = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3529        let src_target = load_bytes_to_tensor(
3530            1280,
3531            720,
3532            RGBA,
3533            None,
3534            include_bytes!("../../../testdata/camera720p.rgba"),
3535        )
3536        .unwrap();
3537        cpu_converter
3538            .convert(
3539                &src_target,
3540                &mut dst_target,
3541                Rotation::None,
3542                Flip::None,
3543                Crop::no_crop(),
3544            )
3545            .unwrap();
3546
3547        compare_images(&dst, &dst_target, 0.98, function!());
3548    }
3549
3550    #[test]
3551    #[cfg(target_os = "linux")]
3552    fn test_yuyv_to_rgba_crop_flip_g2d() {
3553        if !is_g2d_available() {
3554            eprintln!(
3555                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
3556            );
3557            return;
3558        }
3559        if !is_dma_available() {
3560            eprintln!(
3561                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3562            );
3563            return;
3564        }
3565
3566        let src = load_bytes_to_tensor(
3567            1280,
3568            720,
3569            YUYV,
3570            Some(TensorMemory::Dma),
3571            include_bytes!("../../../testdata/camera720p.yuyv"),
3572        )
3573        .unwrap();
3574
3575        let (dst_width, dst_height) = (640, 640);
3576
3577        let mut dst_g2d =
3578            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3579        let mut g2d_converter = G2DProcessor::new().unwrap();
3580
3581        g2d_converter
3582            .convert(
3583                &src,
3584                &mut dst_g2d,
3585                Rotation::None,
3586                Flip::Horizontal,
3587                Crop {
3588                    src_rect: Some(Rect {
3589                        left: 20,
3590                        top: 15,
3591                        width: 400,
3592                        height: 300,
3593                    }),
3594                    dst_rect: None,
3595                    dst_color: None,
3596                },
3597            )
3598            .unwrap();
3599
3600        let mut dst_cpu =
3601            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3602        let mut cpu_converter = CPUProcessor::new();
3603
3604        cpu_converter
3605            .convert(
3606                &src,
3607                &mut dst_cpu,
3608                Rotation::None,
3609                Flip::Horizontal,
3610                Crop {
3611                    src_rect: Some(Rect {
3612                        left: 20,
3613                        top: 15,
3614                        width: 400,
3615                        height: 300,
3616                    }),
3617                    dst_rect: None,
3618                    dst_color: None,
3619                },
3620            )
3621            .unwrap();
3622        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
3623    }
3624
3625    #[test]
3626    #[cfg(target_os = "linux")]
3627    #[cfg(feature = "opengl")]
3628    fn test_yuyv_to_rgba_crop_flip_opengl() {
3629        if !is_opengl_available() {
3630            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3631            return;
3632        }
3633
3634        if !is_dma_available() {
3635            eprintln!(
3636                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3637                function!()
3638            );
3639            return;
3640        }
3641
3642        let src = load_bytes_to_tensor(
3643            1280,
3644            720,
3645            YUYV,
3646            Some(TensorMemory::Dma),
3647            include_bytes!("../../../testdata/camera720p.yuyv"),
3648        )
3649        .unwrap();
3650
3651        let (dst_width, dst_height) = (640, 640);
3652
3653        let mut dst_gl =
3654            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3655        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3656
3657        gl_converter
3658            .convert(
3659                &src,
3660                &mut dst_gl,
3661                Rotation::None,
3662                Flip::Horizontal,
3663                Crop {
3664                    src_rect: Some(Rect {
3665                        left: 20,
3666                        top: 15,
3667                        width: 400,
3668                        height: 300,
3669                    }),
3670                    dst_rect: None,
3671                    dst_color: None,
3672                },
3673            )
3674            .unwrap();
3675
3676        let mut dst_cpu =
3677            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3678        let mut cpu_converter = CPUProcessor::new();
3679
3680        cpu_converter
3681            .convert(
3682                &src,
3683                &mut dst_cpu,
3684                Rotation::None,
3685                Flip::Horizontal,
3686                Crop {
3687                    src_rect: Some(Rect {
3688                        left: 20,
3689                        top: 15,
3690                        width: 400,
3691                        height: 300,
3692                    }),
3693                    dst_rect: None,
3694                    dst_color: None,
3695                },
3696            )
3697            .unwrap();
3698        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
3699    }
3700
3701    #[test]
3702    fn test_vyuy_to_rgba_cpu() {
3703        let file = include_bytes!("../../../testdata/camera720p.vyuy").to_vec();
3704        let src = TensorImage::new(1280, 720, VYUY, None).unwrap();
3705        src.tensor()
3706            .map()
3707            .unwrap()
3708            .as_mut_slice()
3709            .copy_from_slice(&file);
3710
3711        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
3712        let mut cpu_converter = CPUProcessor::new();
3713
3714        cpu_converter
3715            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3716            .unwrap();
3717
3718        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3719        target_image
3720            .tensor()
3721            .map()
3722            .unwrap()
3723            .as_mut_slice()
3724            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3725
3726        compare_images(&dst, &target_image, 0.98, function!());
3727    }
3728
3729    #[test]
3730    fn test_vyuy_to_rgb_cpu() {
3731        let file = include_bytes!("../../../testdata/camera720p.vyuy").to_vec();
3732        let src = TensorImage::new(1280, 720, VYUY, None).unwrap();
3733        src.tensor()
3734            .map()
3735            .unwrap()
3736            .as_mut_slice()
3737            .copy_from_slice(&file);
3738
3739        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3740        let mut cpu_converter = CPUProcessor::new();
3741
3742        cpu_converter
3743            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3744            .unwrap();
3745
3746        let target_image = TensorImage::new(1280, 720, RGB, None).unwrap();
3747        target_image
3748            .tensor()
3749            .map()
3750            .unwrap()
3751            .as_mut_slice()
3752            .as_chunks_mut::<3>()
3753            .0
3754            .iter_mut()
3755            .zip(
3756                include_bytes!("../../../testdata/camera720p.rgba")
3757                    .as_chunks::<4>()
3758                    .0,
3759            )
3760            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3761
3762        compare_images(&dst, &target_image, 0.98, function!());
3763    }
3764
3765    #[test]
3766    #[cfg(target_os = "linux")]
3767    fn test_vyuy_to_rgba_g2d() {
3768        if !is_g2d_available() {
3769            eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3770            return;
3771        }
3772        if !is_dma_available() {
3773            eprintln!(
3774                "SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3775            );
3776            return;
3777        }
3778
3779        let src = load_bytes_to_tensor(
3780            1280,
3781            720,
3782            VYUY,
3783            None,
3784            include_bytes!("../../../testdata/camera720p.vyuy"),
3785        )
3786        .unwrap();
3787
3788        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3789        let mut g2d_converter = G2DProcessor::new().unwrap();
3790
3791        match g2d_converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop()) {
3792            Err(Error::G2D(_)) => {
3793                eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support VYUY format");
3794                return;
3795            }
3796            r => r.unwrap(),
3797        }
3798
3799        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3800        target_image
3801            .tensor()
3802            .map()
3803            .unwrap()
3804            .as_mut_slice()
3805            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3806
3807        compare_images(&dst, &target_image, 0.98, function!());
3808    }
3809
3810    #[test]
3811    #[cfg(target_os = "linux")]
3812    fn test_vyuy_to_rgb_g2d() {
3813        if !is_g2d_available() {
3814            eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3815            return;
3816        }
3817        if !is_dma_available() {
3818            eprintln!(
3819                "SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3820            );
3821            return;
3822        }
3823
3824        let src = load_bytes_to_tensor(
3825            1280,
3826            720,
3827            VYUY,
3828            None,
3829            include_bytes!("../../../testdata/camera720p.vyuy"),
3830        )
3831        .unwrap();
3832
3833        let mut g2d_dst = TensorImage::new(1280, 720, RGB, Some(TensorMemory::Dma)).unwrap();
3834        let mut g2d_converter = G2DProcessor::new().unwrap();
3835
3836        match g2d_converter.convert(
3837            &src,
3838            &mut g2d_dst,
3839            Rotation::None,
3840            Flip::None,
3841            Crop::no_crop(),
3842        ) {
3843            Err(Error::G2D(_)) => {
3844                eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support VYUY format");
3845                return;
3846            }
3847            r => r.unwrap(),
3848        }
3849
3850        let mut cpu_dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3851        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3852
3853        cpu_converter
3854            .convert(
3855                &src,
3856                &mut cpu_dst,
3857                Rotation::None,
3858                Flip::None,
3859                Crop::no_crop(),
3860            )
3861            .unwrap();
3862
3863        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3864    }
3865
3866    #[test]
3867    #[cfg(target_os = "linux")]
3868    #[cfg(feature = "opengl")]
3869    fn test_vyuy_to_rgba_opengl() {
3870        if !is_opengl_available() {
3871            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3872            return;
3873        }
3874        if !is_dma_available() {
3875            eprintln!(
3876                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3877                function!()
3878            );
3879            return;
3880        }
3881
3882        let src = load_bytes_to_tensor(
3883            1280,
3884            720,
3885            VYUY,
3886            Some(TensorMemory::Dma),
3887            include_bytes!("../../../testdata/camera720p.vyuy"),
3888        )
3889        .unwrap();
3890
3891        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3892        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3893
3894        match gl_converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop()) {
3895            Err(Error::NotSupported(_)) => {
3896                eprintln!(
3897                    "SKIPPED: {} - OpenGL does not support VYUY DMA format",
3898                    function!()
3899                );
3900                return;
3901            }
3902            r => r.unwrap(),
3903        }
3904
3905        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3906        target_image
3907            .tensor()
3908            .map()
3909            .unwrap()
3910            .as_mut_slice()
3911            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3912
3913        compare_images(&dst, &target_image, 0.98, function!());
3914    }
3915
3916    #[test]
3917    fn test_nv12_to_rgba_cpu() {
3918        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3919        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3920        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3921
3922        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
3923        let mut cpu_converter = CPUProcessor::new();
3924
3925        cpu_converter
3926            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3927            .unwrap();
3928
3929        let target_image = TensorImage::load_jpeg(
3930            include_bytes!("../../../testdata/zidane.jpg"),
3931            Some(RGBA),
3932            None,
3933        )
3934        .unwrap();
3935
3936        compare_images(&dst, &target_image, 0.98, function!());
3937    }
3938
3939    #[test]
3940    fn test_nv12_to_rgb_cpu() {
3941        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3942        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3943        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3944
3945        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3946        let mut cpu_converter = CPUProcessor::new();
3947
3948        cpu_converter
3949            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3950            .unwrap();
3951
3952        let target_image = TensorImage::load_jpeg(
3953            include_bytes!("../../../testdata/zidane.jpg"),
3954            Some(RGB),
3955            None,
3956        )
3957        .unwrap();
3958
3959        compare_images(&dst, &target_image, 0.98, function!());
3960    }
3961
3962    #[test]
3963    fn test_nv12_to_grey_cpu() {
3964        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3965        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3966        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3967
3968        let mut dst = TensorImage::new(1280, 720, GREY, None).unwrap();
3969        let mut cpu_converter = CPUProcessor::new();
3970
3971        cpu_converter
3972            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3973            .unwrap();
3974
3975        let target_image = TensorImage::load_jpeg(
3976            include_bytes!("../../../testdata/zidane.jpg"),
3977            Some(GREY),
3978            None,
3979        )
3980        .unwrap();
3981
3982        compare_images(&dst, &target_image, 0.98, function!());
3983    }
3984
3985    #[test]
3986    fn test_nv12_to_yuyv_cpu() {
3987        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3988        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3989        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3990
3991        let mut dst = TensorImage::new(1280, 720, YUYV, None).unwrap();
3992        let mut cpu_converter = CPUProcessor::new();
3993
3994        cpu_converter
3995            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3996            .unwrap();
3997
3998        let target_image = TensorImage::load_jpeg(
3999            include_bytes!("../../../testdata/zidane.jpg"),
4000            Some(RGB),
4001            None,
4002        )
4003        .unwrap();
4004
4005        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
4006    }
4007
4008    #[test]
4009    fn test_cpu_resize_planar_rgb() {
4010        let src = TensorImage::new(4, 4, RGBA, None).unwrap();
4011        #[rustfmt::skip]
4012        let src_image = [
4013                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4014                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4015                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4016                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4017        ];
4018        src.tensor()
4019            .map()
4020            .unwrap()
4021            .as_mut_slice()
4022            .copy_from_slice(&src_image);
4023
4024        let mut cpu_dst = TensorImage::new(5, 5, PLANAR_RGB, None).unwrap();
4025        let mut cpu_converter = CPUProcessor::new();
4026
4027        cpu_converter
4028            .convert(
4029                &src,
4030                &mut cpu_dst,
4031                Rotation::None,
4032                Flip::None,
4033                Crop::new()
4034                    .with_dst_rect(Some(Rect {
4035                        left: 1,
4036                        top: 1,
4037                        width: 4,
4038                        height: 4,
4039                    }))
4040                    .with_dst_color(Some([114, 114, 114, 255])),
4041            )
4042            .unwrap();
4043
4044        #[rustfmt::skip]
4045        let expected_dst = [
4046            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4047            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4048            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4049        ];
4050
4051        assert_eq!(cpu_dst.tensor().map().unwrap().as_slice(), &expected_dst);
4052    }
4053
4054    #[test]
4055    fn test_cpu_resize_planar_rgba() {
4056        let src = TensorImage::new(4, 4, RGBA, None).unwrap();
4057        #[rustfmt::skip]
4058        let src_image = [
4059                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4060                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4061                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4062                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4063        ];
4064        src.tensor()
4065            .map()
4066            .unwrap()
4067            .as_mut_slice()
4068            .copy_from_slice(&src_image);
4069
4070        let mut cpu_dst = TensorImage::new(5, 5, PLANAR_RGBA, None).unwrap();
4071        let mut cpu_converter = CPUProcessor::new();
4072
4073        cpu_converter
4074            .convert(
4075                &src,
4076                &mut cpu_dst,
4077                Rotation::None,
4078                Flip::None,
4079                Crop::new()
4080                    .with_dst_rect(Some(Rect {
4081                        left: 1,
4082                        top: 1,
4083                        width: 4,
4084                        height: 4,
4085                    }))
4086                    .with_dst_color(Some([114, 114, 114, 255])),
4087            )
4088            .unwrap();
4089
4090        #[rustfmt::skip]
4091        let expected_dst = [
4092            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4093            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4094            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4095            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
4096        ];
4097
4098        assert_eq!(cpu_dst.tensor().map().unwrap().as_slice(), &expected_dst);
4099    }
4100
4101    #[test]
4102    #[cfg(target_os = "linux")]
4103    #[cfg(feature = "opengl")]
4104    fn test_opengl_resize_planar_rgb() {
4105        if !is_opengl_available() {
4106            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4107            return;
4108        }
4109
4110        if !is_dma_available() {
4111            eprintln!(
4112                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4113                function!()
4114            );
4115            return;
4116        }
4117
4118        let dst_width = 640;
4119        let dst_height = 640;
4120        let file = include_bytes!("../../../testdata/test_image.jpg").to_vec();
4121        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
4122
4123        let mut cpu_dst = TensorImage::new(dst_width, dst_height, PLANAR_RGB, None).unwrap();
4124        let mut cpu_converter = CPUProcessor::new();
4125        cpu_converter
4126            .convert(
4127                &src,
4128                &mut cpu_dst,
4129                Rotation::None,
4130                Flip::None,
4131                Crop::no_crop(),
4132            )
4133            .unwrap();
4134        cpu_converter
4135            .convert(
4136                &src,
4137                &mut cpu_dst,
4138                Rotation::None,
4139                Flip::None,
4140                Crop::new()
4141                    .with_dst_rect(Some(Rect {
4142                        left: 102,
4143                        top: 102,
4144                        width: 440,
4145                        height: 440,
4146                    }))
4147                    .with_dst_color(Some([114, 114, 114, 114])),
4148            )
4149            .unwrap();
4150
4151        let mut gl_dst = TensorImage::new(dst_width, dst_height, PLANAR_RGB, None).unwrap();
4152        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4153
4154        gl_converter
4155            .convert(
4156                &src,
4157                &mut gl_dst,
4158                Rotation::None,
4159                Flip::None,
4160                Crop::new()
4161                    .with_dst_rect(Some(Rect {
4162                        left: 102,
4163                        top: 102,
4164                        width: 440,
4165                        height: 440,
4166                    }))
4167                    .with_dst_color(Some([114, 114, 114, 114])),
4168            )
4169            .unwrap();
4170        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
4171    }
4172
4173    #[test]
4174    fn test_cpu_resize_nv16() {
4175        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
4176        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
4177
4178        let mut cpu_nv16_dst = TensorImage::new(640, 640, NV16, None).unwrap();
4179        let mut cpu_rgb_dst = TensorImage::new(640, 640, RGB, None).unwrap();
4180        let mut cpu_converter = CPUProcessor::new();
4181
4182        cpu_converter
4183            .convert(
4184                &src,
4185                &mut cpu_nv16_dst,
4186                Rotation::None,
4187                Flip::None,
4188                // Crop::no_crop(),
4189                Crop::new()
4190                    .with_dst_rect(Some(Rect {
4191                        left: 20,
4192                        top: 140,
4193                        width: 600,
4194                        height: 360,
4195                    }))
4196                    .with_dst_color(Some([255, 128, 0, 255])),
4197            )
4198            .unwrap();
4199
4200        cpu_converter
4201            .convert(
4202                &src,
4203                &mut cpu_rgb_dst,
4204                Rotation::None,
4205                Flip::None,
4206                Crop::new()
4207                    .with_dst_rect(Some(Rect {
4208                        left: 20,
4209                        top: 140,
4210                        width: 600,
4211                        height: 360,
4212                    }))
4213                    .with_dst_color(Some([255, 128, 0, 255])),
4214            )
4215            .unwrap();
4216        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
4217    }
4218
4219    fn load_bytes_to_tensor(
4220        width: usize,
4221        height: usize,
4222        fourcc: FourCharCode,
4223        memory: Option<TensorMemory>,
4224        bytes: &[u8],
4225    ) -> Result<TensorImage, Error> {
4226        let src = TensorImage::new(width, height, fourcc, memory)?;
4227        src.tensor().map()?.as_mut_slice().copy_from_slice(bytes);
4228        Ok(src)
4229    }
4230
4231    fn compare_images(img1: &TensorImage, img2: &TensorImage, threshold: f64, name: &str) {
4232        assert_eq!(img1.height(), img2.height(), "Heights differ");
4233        assert_eq!(img1.width(), img2.width(), "Widths differ");
4234        assert_eq!(img1.fourcc(), img2.fourcc(), "FourCC differ");
4235        assert!(
4236            matches!(img1.fourcc(), RGB | RGBA | GREY | PLANAR_RGB),
4237            "FourCC must be RGB or RGBA for comparison"
4238        );
4239
4240        let image1 = match img1.fourcc() {
4241            RGB => image::RgbImage::from_vec(
4242                img1.width() as u32,
4243                img1.height() as u32,
4244                img1.tensor().map().unwrap().to_vec(),
4245            )
4246            .unwrap(),
4247            RGBA => image::RgbaImage::from_vec(
4248                img1.width() as u32,
4249                img1.height() as u32,
4250                img1.tensor().map().unwrap().to_vec(),
4251            )
4252            .unwrap()
4253            .convert(),
4254            GREY => image::GrayImage::from_vec(
4255                img1.width() as u32,
4256                img1.height() as u32,
4257                img1.tensor().map().unwrap().to_vec(),
4258            )
4259            .unwrap()
4260            .convert(),
4261            PLANAR_RGB => image::GrayImage::from_vec(
4262                img1.width() as u32,
4263                (img1.height() * 3) as u32,
4264                img1.tensor().map().unwrap().to_vec(),
4265            )
4266            .unwrap()
4267            .convert(),
4268            _ => return,
4269        };
4270
4271        let image2 = match img2.fourcc() {
4272            RGB => image::RgbImage::from_vec(
4273                img2.width() as u32,
4274                img2.height() as u32,
4275                img2.tensor().map().unwrap().to_vec(),
4276            )
4277            .unwrap(),
4278            RGBA => image::RgbaImage::from_vec(
4279                img2.width() as u32,
4280                img2.height() as u32,
4281                img2.tensor().map().unwrap().to_vec(),
4282            )
4283            .unwrap()
4284            .convert(),
4285            GREY => image::GrayImage::from_vec(
4286                img2.width() as u32,
4287                img2.height() as u32,
4288                img2.tensor().map().unwrap().to_vec(),
4289            )
4290            .unwrap()
4291            .convert(),
4292            PLANAR_RGB => image::GrayImage::from_vec(
4293                img2.width() as u32,
4294                (img2.height() * 3) as u32,
4295                img2.tensor().map().unwrap().to_vec(),
4296            )
4297            .unwrap()
4298            .convert(),
4299            _ => return,
4300        };
4301
4302        let similarity = image_compare::rgb_similarity_structure(
4303            &image_compare::Algorithm::RootMeanSquared,
4304            &image1,
4305            &image2,
4306        )
4307        .expect("Image Comparison failed");
4308        if similarity.score < threshold {
4309            // image1.save(format!("{name}_1.png"));
4310            // image2.save(format!("{name}_2.png"));
4311            similarity
4312                .image
4313                .to_color_map()
4314                .save(format!("{name}.png"))
4315                .unwrap();
4316            panic!(
4317                "{name}: converted image and target image have similarity score too low: {} < {}",
4318                similarity.score, threshold
4319            )
4320        }
4321    }
4322
4323    fn compare_images_convert_to_rgb(
4324        img1: &TensorImage,
4325        img2: &TensorImage,
4326        threshold: f64,
4327        name: &str,
4328    ) {
4329        assert_eq!(img1.height(), img2.height(), "Heights differ");
4330        assert_eq!(img1.width(), img2.width(), "Widths differ");
4331
4332        let mut img_rgb1 =
4333            TensorImage::new(img1.width(), img1.height(), RGB, Some(TensorMemory::Mem)).unwrap();
4334        let mut img_rgb2 =
4335            TensorImage::new(img1.width(), img1.height(), RGB, Some(TensorMemory::Mem)).unwrap();
4336        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
4337        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
4338
4339        let image1 = image::RgbImage::from_vec(
4340            img_rgb1.width() as u32,
4341            img_rgb1.height() as u32,
4342            img_rgb1.tensor().map().unwrap().to_vec(),
4343        )
4344        .unwrap();
4345
4346        let image2 = image::RgbImage::from_vec(
4347            img_rgb2.width() as u32,
4348            img_rgb2.height() as u32,
4349            img_rgb2.tensor().map().unwrap().to_vec(),
4350        )
4351        .unwrap();
4352
4353        let similarity = image_compare::rgb_similarity_structure(
4354            &image_compare::Algorithm::RootMeanSquared,
4355            &image1,
4356            &image2,
4357        )
4358        .expect("Image Comparison failed");
4359        if similarity.score < threshold {
4360            // image1.save(format!("{name}_1.png"));
4361            // image2.save(format!("{name}_2.png"));
4362            similarity
4363                .image
4364                .to_color_map()
4365                .save(format!("{name}.png"))
4366                .unwrap();
4367            panic!(
4368                "{name}: converted image and target image have similarity score too low: {} < {}",
4369                similarity.score, threshold
4370            )
4371        }
4372    }
4373
4374    // =========================================================================
4375    // NV12 Format Tests
4376    // =========================================================================
4377
4378    #[test]
4379    fn test_nv12_tensor_image_creation() {
4380        let width = 640;
4381        let height = 480;
4382        let img = TensorImage::new(width, height, NV12, None).unwrap();
4383
4384        assert_eq!(img.width(), width);
4385        assert_eq!(img.height(), height);
4386        assert_eq!(img.fourcc(), NV12);
4387        // NV12 uses shape [H*3/2, W] to store Y plane + UV plane
4388        assert_eq!(img.tensor().shape(), &[height * 3 / 2, width]);
4389    }
4390
4391    #[test]
4392    fn test_nv12_channels() {
4393        let img = TensorImage::new(640, 480, NV12, None).unwrap();
4394        // NV12 reports 2 channels (Y + interleaved UV)
4395        assert_eq!(img.channels(), 2);
4396    }
4397
4398    // =========================================================================
4399    // TensorImageRef Tests
4400    // =========================================================================
4401
4402    #[test]
4403    fn test_tensor_image_ref_from_planar_tensor() {
4404        // Create a planar RGB tensor [3, 480, 640]
4405        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4406
4407        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
4408
4409        assert_eq!(img_ref.width(), 640);
4410        assert_eq!(img_ref.height(), 480);
4411        assert_eq!(img_ref.channels(), 3);
4412        assert_eq!(img_ref.fourcc(), PLANAR_RGB);
4413        assert!(img_ref.is_planar());
4414    }
4415
4416    #[test]
4417    fn test_tensor_image_ref_from_interleaved_tensor() {
4418        // Create an interleaved RGBA tensor [480, 640, 4]
4419        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
4420
4421        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA).unwrap();
4422
4423        assert_eq!(img_ref.width(), 640);
4424        assert_eq!(img_ref.height(), 480);
4425        assert_eq!(img_ref.channels(), 4);
4426        assert_eq!(img_ref.fourcc(), RGBA);
4427        assert!(!img_ref.is_planar());
4428    }
4429
4430    #[test]
4431    fn test_tensor_image_ref_invalid_shape() {
4432        // 2D tensor should fail
4433        let mut tensor = Tensor::<u8>::new(&[480, 640], None, None).unwrap();
4434        let result = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB);
4435        assert!(matches!(result, Err(Error::InvalidShape(_))));
4436    }
4437
4438    #[test]
4439    fn test_tensor_image_ref_wrong_channels() {
4440        // RGBA expects 4 channels but tensor has 3
4441        let mut tensor = Tensor::<u8>::new(&[480, 640, 3], None, None).unwrap();
4442        let result = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA);
4443        assert!(matches!(result, Err(Error::InvalidShape(_))));
4444    }
4445
4446    #[test]
4447    fn test_tensor_image_dst_trait_tensor_image() {
4448        let img = TensorImage::new(640, 480, RGB, None).unwrap();
4449
4450        // Test TensorImageDst trait implementation
4451        fn check_dst<T: TensorImageDst>(dst: &T) {
4452            assert_eq!(dst.width(), 640);
4453            assert_eq!(dst.height(), 480);
4454            assert_eq!(dst.channels(), 3);
4455            assert!(!dst.is_planar());
4456        }
4457
4458        check_dst(&img);
4459    }
4460
4461    #[test]
4462    fn test_tensor_image_dst_trait_tensor_image_ref() {
4463        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4464        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
4465
4466        fn check_dst<T: TensorImageDst>(dst: &T) {
4467            assert_eq!(dst.width(), 640);
4468            assert_eq!(dst.height(), 480);
4469            assert_eq!(dst.channels(), 3);
4470            assert!(dst.is_planar());
4471        }
4472
4473        check_dst(&img_ref);
4474    }
4475
4476    #[test]
4477    fn test_rgb_int8_format() {
4478        let img = TensorImage::new(1280, 720, RGB_INT8, Some(TensorMemory::Mem)).unwrap();
4479        assert_eq!(img.width(), 1280);
4480        assert_eq!(img.height(), 720);
4481        assert_eq!(img.channels(), 3);
4482        assert!(!img.is_planar());
4483        assert_eq!(img.fourcc(), RGB_INT8);
4484    }
4485
4486    #[test]
4487    fn test_planar_rgb_int8_format() {
4488        let img = TensorImage::new(1280, 720, PLANAR_RGB_INT8, Some(TensorMemory::Mem)).unwrap();
4489        assert_eq!(img.width(), 1280);
4490        assert_eq!(img.height(), 720);
4491        assert_eq!(img.channels(), 3);
4492        assert!(img.is_planar());
4493        assert_eq!(img.fourcc(), PLANAR_RGB_INT8);
4494    }
4495
4496    #[test]
4497    fn test_rgb_int8_from_tensor() {
4498        let tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
4499        let img = TensorImage::from_tensor(tensor, RGB_INT8).unwrap();
4500        assert_eq!(img.width(), 1280);
4501        assert_eq!(img.height(), 720);
4502        assert_eq!(img.channels(), 3);
4503        assert!(!img.is_planar());
4504        assert_eq!(img.fourcc(), RGB_INT8);
4505    }
4506
4507    #[test]
4508    fn test_planar_rgb_int8_from_tensor() {
4509        let tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
4510        let img = TensorImage::from_tensor(tensor, PLANAR_RGB_INT8).unwrap();
4511        assert_eq!(img.width(), 1280);
4512        assert_eq!(img.height(), 720);
4513        assert_eq!(img.channels(), 3);
4514        assert!(img.is_planar());
4515        assert_eq!(img.fourcc(), PLANAR_RGB_INT8);
4516    }
4517
4518    #[test]
4519    fn test_fourcc_is_int8() {
4520        assert!(fourcc_is_int8(RGB_INT8));
4521        assert!(fourcc_is_int8(PLANAR_RGB_INT8));
4522        assert!(!fourcc_is_int8(RGB));
4523        assert!(!fourcc_is_int8(PLANAR_RGB));
4524        assert!(!fourcc_is_int8(RGBA));
4525    }
4526
4527    #[test]
4528    fn test_fourcc_uint8_equivalent() {
4529        assert_eq!(fourcc_uint8_equivalent(RGB_INT8), RGB);
4530        assert_eq!(fourcc_uint8_equivalent(PLANAR_RGB_INT8), PLANAR_RGB);
4531        assert_eq!(fourcc_uint8_equivalent(RGBA), RGBA);
4532    }
4533
4534    #[test]
4535    fn test_fourcc_is_packed_rgb() {
4536        assert!(fourcc_is_packed_rgb(RGB));
4537        assert!(fourcc_is_packed_rgb(RGB_INT8));
4538        assert!(!fourcc_is_packed_rgb(PLANAR_RGB));
4539        assert!(!fourcc_is_packed_rgb(RGBA));
4540    }
4541
4542    /// Integration test that exercises the PBO-to-PBO convert path.
4543    /// Uses ImageProcessor::create_image() to allocate PBO-backed tensors,
4544    /// then converts between them. Skipped when GL is unavailable or the
4545    /// backend is not PBO (e.g. DMA-buf systems).
4546    #[cfg(target_os = "linux")]
4547    #[cfg(feature = "opengl")]
4548    #[test]
4549    fn test_convert_pbo_to_pbo() {
4550        let mut converter = ImageProcessor::new().unwrap();
4551
4552        // Skip if GL is not available or backend is not PBO
4553        let is_pbo = converter
4554            .opengl
4555            .as_ref()
4556            .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
4557        if !is_pbo {
4558            eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
4559            return;
4560        }
4561
4562        let src_w = 640;
4563        let src_h = 480;
4564        let dst_w = 320;
4565        let dst_h = 240;
4566
4567        // Create PBO-backed source image
4568        let pbo_src = converter.create_image(src_w, src_h, RGBA).unwrap();
4569        assert_eq!(
4570            pbo_src.tensor().memory(),
4571            TensorMemory::Pbo,
4572            "create_image should produce a PBO tensor"
4573        );
4574
4575        // Fill source PBO with test pattern: load JPEG then convert Mem→PBO
4576        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
4577        let jpeg_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
4578
4579        // Resize JPEG into a Mem temp of the right size, then copy into PBO
4580        let mut mem_src = TensorImage::new(src_w, src_h, RGBA, Some(TensorMemory::Mem)).unwrap();
4581        CPUProcessor::new()
4582            .convert(
4583                &jpeg_src,
4584                &mut mem_src,
4585                Rotation::None,
4586                Flip::None,
4587                Crop::no_crop(),
4588            )
4589            .unwrap();
4590
4591        // Copy pixel data into the PBO source by mapping it
4592        {
4593            let src_data = mem_src.tensor().map().unwrap();
4594            let mut pbo_map = pbo_src.tensor().map().unwrap();
4595            pbo_map.copy_from_slice(&src_data);
4596        }
4597
4598        // Create PBO-backed destination image
4599        let mut pbo_dst = converter.create_image(dst_w, dst_h, RGBA).unwrap();
4600        assert_eq!(pbo_dst.tensor().memory(), TensorMemory::Pbo);
4601
4602        // Convert PBO→PBO (this exercises convert_pbo_to_pbo)
4603        converter
4604            .convert(
4605                &pbo_src,
4606                &mut pbo_dst,
4607                Rotation::None,
4608                Flip::None,
4609                Crop::no_crop(),
4610            )
4611            .unwrap();
4612
4613        // Verify: compare with CPU-only conversion of the same input
4614        let mut cpu_dst = TensorImage::new(dst_w, dst_h, RGBA, Some(TensorMemory::Mem)).unwrap();
4615        CPUProcessor::new()
4616            .convert(
4617                &mem_src,
4618                &mut cpu_dst,
4619                Rotation::None,
4620                Flip::None,
4621                Crop::no_crop(),
4622            )
4623            .unwrap();
4624
4625        compare_images(&pbo_dst, &cpu_dst, 0.95, function!());
4626        log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
4627    }
4628}