Skip to main content

edgefirst_image/
cpu.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::{
5    Crop, Error, Flip, FunctionTimer, ImageProcessorTrait, Rect, Result, Rotation, TensorImage,
6    TensorImageDst, TensorImageRef, GREY, NV12, NV16, PLANAR_RGB, PLANAR_RGBA, RGB, RGBA, YUYV,
7};
8#[cfg(feature = "decoder")]
9use edgefirst_decoder::{DetectBox, Segmentation};
10use edgefirst_tensor::{TensorMapTrait, TensorTrait};
11use four_char_code::FourCharCode;
12use ndarray::{ArrayView3, ArrayViewMut3, Axis};
13use rayon::iter::{
14    IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator,
15};
16use std::ops::Shr;
17
18/// CPUConverter implements the ImageProcessor trait using the fallback CPU
19/// implementation for image processing.
20#[derive(Debug, Clone)]
21pub struct CPUProcessor {
22    resizer: fast_image_resize::Resizer,
23    options: fast_image_resize::ResizeOptions,
24    #[cfg(feature = "decoder")]
25    colors: [[u8; 4]; 20],
26}
27
28unsafe impl Send for CPUProcessor {}
29unsafe impl Sync for CPUProcessor {}
30
31#[inline(always)]
32fn limit_to_full(l: u8) -> u8 {
33    (((l as u16 - 16) * 255 + (240 - 16) / 2) / (240 - 16)) as u8
34}
35
36#[inline(always)]
37fn full_to_limit(l: u8) -> u8 {
38    ((l as u16 * (240 - 16) + 255 / 2) / 255 + 16) as u8
39}
40
41impl Default for CPUProcessor {
42    fn default() -> Self {
43        Self::new_bilinear()
44    }
45}
46
47impl CPUProcessor {
48    /// Creates a new CPUConverter with bilinear resizing.
49    pub fn new() -> Self {
50        Self::new_bilinear()
51    }
52
53    /// Creates a new CPUConverter with bilinear resizing.
54    fn new_bilinear() -> Self {
55        let resizer = fast_image_resize::Resizer::new();
56        let options = fast_image_resize::ResizeOptions::new()
57            .resize_alg(fast_image_resize::ResizeAlg::Convolution(
58                fast_image_resize::FilterType::Bilinear,
59            ))
60            .use_alpha(false);
61
62        log::debug!("CPUConverter created");
63        Self {
64            resizer,
65            options,
66            #[cfg(feature = "decoder")]
67            colors: crate::DEFAULT_COLORS_U8,
68        }
69    }
70
71    /// Creates a new CPUConverter with nearest neighbor resizing.
72    pub fn new_nearest() -> Self {
73        let resizer = fast_image_resize::Resizer::new();
74        let options = fast_image_resize::ResizeOptions::new()
75            .resize_alg(fast_image_resize::ResizeAlg::Nearest)
76            .use_alpha(false);
77        log::debug!("CPUConverter created");
78        Self {
79            resizer,
80            options,
81            #[cfg(feature = "decoder")]
82            colors: crate::DEFAULT_COLORS_U8,
83        }
84    }
85
86    pub(crate) fn flip_rotate_ndarray(
87        src_map: &[u8],
88        dst_map: &mut [u8],
89        dst: &TensorImage,
90        rotation: Rotation,
91        flip: Flip,
92    ) -> Result<(), crate::Error> {
93        let mut dst_view =
94            ArrayViewMut3::from_shape((dst.height(), dst.width(), dst.channels()), dst_map)?;
95        let mut src_view = match rotation {
96            Rotation::None | Rotation::Rotate180 => {
97                ArrayView3::from_shape((dst.height(), dst.width(), dst.channels()), src_map)?
98            }
99            Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
100                ArrayView3::from_shape((dst.width(), dst.height(), dst.channels()), src_map)?
101            }
102        };
103
104        match flip {
105            Flip::None => {}
106            Flip::Vertical => {
107                src_view.invert_axis(Axis(0));
108            }
109            Flip::Horizontal => {
110                src_view.invert_axis(Axis(1));
111            }
112        }
113
114        match rotation {
115            Rotation::None => {}
116            Rotation::Clockwise90 => {
117                src_view.swap_axes(0, 1);
118                src_view.invert_axis(Axis(1));
119            }
120            Rotation::Rotate180 => {
121                src_view.invert_axis(Axis(0));
122                src_view.invert_axis(Axis(1));
123            }
124            Rotation::CounterClockwise90 => {
125                src_view.swap_axes(0, 1);
126                src_view.invert_axis(Axis(0));
127            }
128        }
129
130        dst_view.assign(&src_view);
131
132        Ok(())
133    }
134
135    fn convert_nv12_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
136        assert_eq!(src.fourcc(), NV12);
137        assert_eq!(dst.fourcc(), RGB);
138        let map = src.tensor.map()?;
139        let y_stride = src.width() as u32;
140        let uv_stride = src.width() as u32;
141        let slices = map.as_slice().split_at(y_stride as usize * src.height());
142
143        let src = yuv::YuvBiPlanarImage {
144            y_plane: slices.0,
145            y_stride,
146            uv_plane: slices.1,
147            uv_stride,
148            width: src.width() as u32,
149            height: src.height() as u32,
150        };
151
152        Ok(yuv::yuv_nv12_to_rgb(
153            &src,
154            dst.tensor.map()?.as_mut_slice(),
155            dst.row_stride() as u32,
156            yuv::YuvRange::Limited,
157            yuv::YuvStandardMatrix::Bt709,
158            yuv::YuvConversionMode::Balanced,
159        )?)
160    }
161
162    fn convert_nv12_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
163        assert_eq!(src.fourcc(), NV12);
164        assert_eq!(dst.fourcc(), RGBA);
165        let map = src.tensor.map()?;
166        let y_stride = src.width() as u32;
167        let uv_stride = src.width() as u32;
168        let slices = map.as_slice().split_at(y_stride as usize * src.height());
169
170        let src = yuv::YuvBiPlanarImage {
171            y_plane: slices.0,
172            y_stride,
173            uv_plane: slices.1,
174            uv_stride,
175            width: src.width() as u32,
176            height: src.height() as u32,
177        };
178
179        Ok(yuv::yuv_nv12_to_rgba(
180            &src,
181            dst.tensor.map()?.as_mut_slice(),
182            dst.row_stride() as u32,
183            yuv::YuvRange::Limited,
184            yuv::YuvStandardMatrix::Bt709,
185            yuv::YuvConversionMode::Balanced,
186        )?)
187    }
188
189    fn convert_nv12_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
190        assert_eq!(src.fourcc(), NV12);
191        assert_eq!(dst.fourcc(), GREY);
192        let src_map = src.tensor.map()?;
193        let mut dst_map = dst.tensor.map()?;
194        let y_stride = src.width() as u32;
195        let y_slice = src_map
196            .as_slice()
197            .split_at(y_stride as usize * src.height())
198            .0;
199        let src_chunks = y_slice.as_chunks::<8>();
200        let dst_chunks = dst_map.as_chunks_mut::<8>();
201        for (s, d) in src_chunks.0.iter().zip(dst_chunks.0) {
202            s.iter().zip(d).for_each(|(s, d)| *d = limit_to_full(*s));
203        }
204
205        for (s, d) in src_chunks.1.iter().zip(dst_chunks.1) {
206            *d = limit_to_full(*s);
207        }
208
209        Ok(())
210    }
211
212    fn convert_yuyv_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
213        assert_eq!(src.fourcc(), YUYV);
214        assert_eq!(dst.fourcc(), RGB);
215        let src = yuv::YuvPackedImage::<u8> {
216            yuy: &src.tensor.map()?,
217            yuy_stride: src.row_stride() as u32, // we assume packed yuyv
218            width: src.width() as u32,
219            height: src.height() as u32,
220        };
221
222        Ok(yuv::yuyv422_to_rgb(
223            &src,
224            dst.tensor.map()?.as_mut_slice(),
225            dst.width() as u32 * 3,
226            yuv::YuvRange::Limited,
227            yuv::YuvStandardMatrix::Bt709,
228        )?)
229    }
230
231    fn convert_yuyv_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
232        assert_eq!(src.fourcc(), YUYV);
233        assert_eq!(dst.fourcc(), RGBA);
234        let src = yuv::YuvPackedImage::<u8> {
235            yuy: &src.tensor.map()?,
236            yuy_stride: src.row_stride() as u32, // we assume packed yuyv
237            width: src.width() as u32,
238            height: src.height() as u32,
239        };
240
241        Ok(yuv::yuyv422_to_rgba(
242            &src,
243            dst.tensor.map()?.as_mut_slice(),
244            dst.row_stride() as u32,
245            yuv::YuvRange::Limited,
246            yuv::YuvStandardMatrix::Bt709,
247        )?)
248    }
249
250    fn convert_yuyv_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
251        assert_eq!(src.fourcc(), YUYV);
252        assert_eq!(dst.fourcc(), PLANAR_RGB);
253        let mut tmp = TensorImage::new(src.width(), src.height(), RGB, None)?;
254        Self::convert_yuyv_to_rgb(src, &mut tmp)?;
255        Self::convert_rgb_to_8bps(&tmp, dst)
256    }
257
258    fn convert_yuyv_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
259        assert_eq!(src.fourcc(), YUYV);
260        assert_eq!(dst.fourcc(), PLANAR_RGBA);
261        let mut tmp = TensorImage::new(src.width(), src.height(), RGB, None)?;
262        Self::convert_yuyv_to_rgb(src, &mut tmp)?;
263        Self::convert_rgb_to_prgba(&tmp, dst)
264    }
265
266    fn convert_yuyv_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
267        assert_eq!(src.fourcc(), YUYV);
268        assert_eq!(dst.fourcc(), GREY);
269        let src_map = src.tensor.map()?;
270        let mut dst_map = dst.tensor.map()?;
271        let src_chunks = src_map.as_chunks::<16>();
272        let dst_chunks = dst_map.as_chunks_mut::<8>();
273        for (s, d) in src_chunks.0.iter().zip(dst_chunks.0) {
274            s.iter()
275                .step_by(2)
276                .zip(d)
277                .for_each(|(s, d)| *d = limit_to_full(*s));
278        }
279
280        for (s, d) in src_chunks.1.iter().step_by(2).zip(dst_chunks.1) {
281            *d = limit_to_full(*s);
282        }
283
284        Ok(())
285    }
286
287    fn convert_yuyv_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
288        assert_eq!(src.fourcc(), YUYV);
289        assert_eq!(dst.fourcc(), NV16);
290        let src_map = src.tensor.map()?;
291        let mut dst_map = dst.tensor.map()?;
292
293        let src_chunks = src_map.as_chunks::<2>().0;
294        let (y_plane, uv_plane) = dst_map.split_at_mut(dst.row_stride() * dst.height());
295
296        for ((s, y), uv) in src_chunks.iter().zip(y_plane).zip(uv_plane) {
297            *y = s[0];
298            *uv = s[1];
299        }
300        Ok(())
301    }
302
303    fn convert_grey_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
304        assert_eq!(src.fourcc(), GREY);
305        assert_eq!(dst.fourcc(), RGB);
306        let src = yuv::YuvGrayImage::<u8> {
307            y_plane: &src.tensor.map()?,
308            y_stride: src.row_stride() as u32, // we assume packed Y
309            width: src.width() as u32,
310            height: src.height() as u32,
311        };
312        Ok(yuv::yuv400_to_rgb(
313            &src,
314            dst.tensor.map()?.as_mut_slice(),
315            dst.row_stride() as u32,
316            yuv::YuvRange::Full,
317            yuv::YuvStandardMatrix::Bt709,
318        )?)
319    }
320
321    fn convert_grey_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
322        assert_eq!(src.fourcc(), GREY);
323        assert_eq!(dst.fourcc(), RGBA);
324        let src = yuv::YuvGrayImage::<u8> {
325            y_plane: &src.tensor.map()?,
326            y_stride: src.row_stride() as u32,
327            width: src.width() as u32,
328            height: src.height() as u32,
329        };
330        Ok(yuv::yuv400_to_rgba(
331            &src,
332            dst.tensor.map()?.as_mut_slice(),
333            dst.row_stride() as u32,
334            yuv::YuvRange::Full,
335            yuv::YuvStandardMatrix::Bt709,
336        )?)
337    }
338
339    fn convert_grey_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
340        assert_eq!(src.fourcc(), GREY);
341        assert_eq!(dst.fourcc(), PLANAR_RGB);
342
343        let src = src.tensor().map()?;
344        let src = src.as_slice();
345
346        let mut dst_map = dst.tensor().map()?;
347        let dst_ = dst_map.as_mut_slice();
348
349        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
350        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
351
352        rayon::scope(|s| {
353            s.spawn(|_| dst0.copy_from_slice(src));
354            s.spawn(|_| dst1.copy_from_slice(src));
355            s.spawn(|_| dst2.copy_from_slice(src));
356        });
357        Ok(())
358    }
359
360    fn convert_grey_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
361        assert_eq!(src.fourcc(), GREY);
362        assert_eq!(dst.fourcc(), PLANAR_RGBA);
363
364        let src = src.tensor().map()?;
365        let src = src.as_slice();
366
367        let mut dst_map = dst.tensor().map()?;
368        let dst_ = dst_map.as_mut_slice();
369
370        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
371        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
372        let (dst2, dst3) = dst2.split_at_mut(dst.width() * dst.height());
373        rayon::scope(|s| {
374            s.spawn(|_| dst0.copy_from_slice(src));
375            s.spawn(|_| dst1.copy_from_slice(src));
376            s.spawn(|_| dst2.copy_from_slice(src));
377            s.spawn(|_| dst3.fill(255));
378        });
379        Ok(())
380    }
381
382    fn convert_grey_to_yuyv(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
383        assert_eq!(src.fourcc(), GREY);
384        assert_eq!(dst.fourcc(), YUYV);
385
386        let src = src.tensor().map()?;
387        let src = src.as_slice();
388
389        let mut dst = dst.tensor().map()?;
390        let dst = dst.as_mut_slice();
391        for (s, d) in src
392            .as_chunks::<2>()
393            .0
394            .iter()
395            .zip(dst.as_chunks_mut::<4>().0.iter_mut())
396        {
397            d[0] = full_to_limit(s[0]);
398            d[1] = 128;
399
400            d[2] = full_to_limit(s[1]);
401            d[3] = 128;
402        }
403        Ok(())
404    }
405
406    fn convert_grey_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
407        assert_eq!(src.fourcc(), GREY);
408        assert_eq!(dst.fourcc(), NV16);
409
410        let src = src.tensor().map()?;
411        let src = src.as_slice();
412
413        let mut dst = dst.tensor().map()?;
414        let dst = dst.as_mut_slice();
415
416        for (s, d) in src.iter().zip(dst[0..src.len()].iter_mut()) {
417            *d = full_to_limit(*s);
418        }
419        dst[src.len()..].fill(128);
420
421        Ok(())
422    }
423
424    fn convert_rgba_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
425        assert_eq!(src.fourcc(), RGBA);
426        assert_eq!(dst.fourcc(), RGB);
427
428        Ok(yuv::rgba_to_rgb(
429            src.tensor.map()?.as_slice(),
430            (src.width() * src.channels()) as u32,
431            dst.tensor.map()?.as_mut_slice(),
432            (dst.width() * dst.channels()) as u32,
433            src.width() as u32,
434            src.height() as u32,
435        )?)
436    }
437
438    fn convert_rgba_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
439        assert_eq!(src.fourcc(), RGBA);
440        assert_eq!(dst.fourcc(), GREY);
441
442        let mut dst = yuv::YuvGrayImageMut::<u8> {
443            y_plane: yuv::BufferStoreMut::Borrowed(&mut dst.tensor.map()?),
444            y_stride: dst.row_stride() as u32,
445            width: dst.width() as u32,
446            height: dst.height() as u32,
447        };
448        Ok(yuv::rgba_to_yuv400(
449            &mut dst,
450            src.tensor.map()?.as_slice(),
451            src.row_stride() as u32,
452            yuv::YuvRange::Full,
453            yuv::YuvStandardMatrix::Bt709,
454        )?)
455    }
456
457    fn convert_rgba_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
458        assert_eq!(src.fourcc(), RGBA);
459        assert_eq!(dst.fourcc(), PLANAR_RGB);
460
461        let src = src.tensor().map()?;
462        let src = src.as_slice();
463        let src = src.as_chunks::<4>().0;
464
465        let mut dst_map = dst.tensor().map()?;
466        let dst_ = dst_map.as_mut_slice();
467
468        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
469        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
470
471        src.par_iter()
472            .zip_eq(dst0)
473            .zip_eq(dst1)
474            .zip_eq(dst2)
475            .for_each(|(((s, d0), d1), d2)| {
476                *d0 = s[0];
477                *d1 = s[1];
478                *d2 = s[2];
479            });
480        Ok(())
481    }
482
483    fn convert_rgba_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
484        assert_eq!(src.fourcc(), RGBA);
485        assert_eq!(dst.fourcc(), PLANAR_RGBA);
486
487        let src = src.tensor().map()?;
488        let src = src.as_slice();
489        let src = src.as_chunks::<4>().0;
490
491        let mut dst_map = dst.tensor().map()?;
492        let dst_ = dst_map.as_mut_slice();
493
494        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
495        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
496        let (dst2, dst3) = dst2.split_at_mut(dst.width() * dst.height());
497
498        src.par_iter()
499            .zip_eq(dst0)
500            .zip_eq(dst1)
501            .zip_eq(dst2)
502            .zip_eq(dst3)
503            .for_each(|((((s, d0), d1), d2), d3)| {
504                *d0 = s[0];
505                *d1 = s[1];
506                *d2 = s[2];
507                *d3 = s[3];
508            });
509        Ok(())
510    }
511
512    fn convert_rgba_to_yuyv(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
513        assert_eq!(src.fourcc(), RGBA);
514        assert_eq!(dst.fourcc(), YUYV);
515
516        let src = src.tensor().map()?;
517        let src = src.as_slice();
518
519        let mut dst = dst.tensor().map()?;
520        let dst = dst.as_mut_slice();
521
522        // compute quantized Bt.709 limited range RGB to YUV matrix
523        const KR: f64 = 0.2126f64;
524        const KB: f64 = 0.0722f64;
525        const KG: f64 = 1.0 - KR - KB;
526        const BIAS: i32 = 20;
527
528        const Y_R: i32 = (KR * (219 << BIAS) as f64 / 255.0).round() as i32;
529        const Y_G: i32 = (KG * (219 << BIAS) as f64 / 255.0).round() as i32;
530        const Y_B: i32 = (KB * (219 << BIAS) as f64 / 255.0).round() as i32;
531
532        const U_R: i32 = (-KR / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
533        const U_G: i32 = (-KG / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
534        const U_B: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
535
536        const V_R: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
537        const V_G: i32 = (-KG / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
538        const V_B: i32 = (-KB / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
539        const ROUND: i32 = 1 << (BIAS - 1);
540        const ROUND2: i32 = 1 << BIAS;
541        let process_rgba_to_yuyv = |s: &[u8; 8], d: &mut [u8; 4]| {
542            let [r0, g0, b0, _, r1, g1, b1, _] = *s;
543            let r0 = r0 as i32;
544            let g0 = g0 as i32;
545            let b0 = b0 as i32;
546            let r1 = r1 as i32;
547            let g1 = g1 as i32;
548            let b1 = b1 as i32;
549            d[0] = ((Y_R * r0 + Y_G * g0 + Y_B * b0 + ROUND).shr(BIAS) + 16) as u8;
550            d[1] = ((U_R * r0 + U_G * g0 + U_B * b0 + U_R * r1 + U_G * g1 + U_B * b1 + ROUND2)
551                .shr(BIAS + 1)
552                + 128) as u8;
553            d[2] = ((Y_R * r1 + Y_G * g1 + Y_B * b1 + ROUND).shr(BIAS) + 16) as u8;
554            d[3] = ((V_R * r0 + V_G * g0 + V_B * b0 + V_R * r1 + V_G * g1 + V_B * b1 + ROUND2)
555                .shr(BIAS + 1)
556                + 128) as u8;
557        };
558
559        let src = src.as_chunks::<{ 8 * 32 }>();
560        let dst = dst.as_chunks_mut::<{ 4 * 32 }>();
561
562        for (s, d) in src.0.iter().zip(dst.0.iter_mut()) {
563            let s = s.as_chunks::<8>().0;
564            let d = d.as_chunks_mut::<4>().0;
565            for (s, d) in s.iter().zip(d.iter_mut()) {
566                process_rgba_to_yuyv(s, d);
567            }
568        }
569
570        let s = src.1.as_chunks::<8>().0;
571        let d = dst.1.as_chunks_mut::<4>().0;
572        for (s, d) in s.iter().zip(d.iter_mut()) {
573            process_rgba_to_yuyv(s, d);
574        }
575
576        Ok(())
577    }
578
579    fn convert_rgba_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
580        assert_eq!(src.fourcc(), RGBA);
581        assert_eq!(dst.fourcc(), NV16);
582
583        let mut dst_map = dst.tensor().map()?;
584
585        let (y_plane, uv_plane) = dst_map.split_at_mut(dst.width() * dst.height());
586        let mut bi_planar_image = yuv::YuvBiPlanarImageMut::<u8> {
587            y_plane: yuv::BufferStoreMut::Borrowed(y_plane),
588            y_stride: dst.width() as u32,
589            uv_plane: yuv::BufferStoreMut::Borrowed(uv_plane),
590            uv_stride: dst.width() as u32,
591            width: dst.width() as u32,
592            height: dst.height() as u32,
593        };
594
595        Ok(yuv::rgba_to_yuv_nv16(
596            &mut bi_planar_image,
597            src.tensor.map()?.as_slice(),
598            src.row_stride() as u32,
599            yuv::YuvRange::Limited,
600            yuv::YuvStandardMatrix::Bt709,
601            yuv::YuvConversionMode::Balanced,
602        )?)
603    }
604
605    fn convert_rgb_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
606        assert_eq!(src.fourcc(), RGB);
607        assert_eq!(dst.fourcc(), RGBA);
608
609        Ok(yuv::rgb_to_rgba(
610            src.tensor.map()?.as_slice(),
611            (src.width() * src.channels()) as u32,
612            dst.tensor.map()?.as_mut_slice(),
613            (dst.width() * dst.channels()) as u32,
614            src.width() as u32,
615            src.height() as u32,
616        )?)
617    }
618
619    fn convert_rgb_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
620        assert_eq!(src.fourcc(), RGB);
621        assert_eq!(dst.fourcc(), GREY);
622
623        let mut dst = yuv::YuvGrayImageMut::<u8> {
624            y_plane: yuv::BufferStoreMut::Borrowed(&mut dst.tensor.map()?),
625            y_stride: dst.row_stride() as u32,
626            width: dst.width() as u32,
627            height: dst.height() as u32,
628        };
629        Ok(yuv::rgb_to_yuv400(
630            &mut dst,
631            src.tensor.map()?.as_slice(),
632            src.row_stride() as u32,
633            yuv::YuvRange::Full,
634            yuv::YuvStandardMatrix::Bt709,
635        )?)
636    }
637
638    fn convert_rgb_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
639        assert_eq!(src.fourcc(), RGB);
640        assert_eq!(dst.fourcc(), PLANAR_RGB);
641
642        let src = src.tensor().map()?;
643        let src = src.as_slice();
644        let src = src.as_chunks::<3>().0;
645
646        let mut dst_map = dst.tensor().map()?;
647        let dst_ = dst_map.as_mut_slice();
648
649        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
650        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
651
652        src.par_iter()
653            .zip_eq(dst0)
654            .zip_eq(dst1)
655            .zip_eq(dst2)
656            .for_each(|(((s, d0), d1), d2)| {
657                *d0 = s[0];
658                *d1 = s[1];
659                *d2 = s[2];
660            });
661        Ok(())
662    }
663
664    fn convert_rgb_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
665        assert_eq!(src.fourcc(), RGB);
666        assert_eq!(dst.fourcc(), PLANAR_RGBA);
667
668        let src = src.tensor().map()?;
669        let src = src.as_slice();
670        let src = src.as_chunks::<3>().0;
671
672        let mut dst_map = dst.tensor().map()?;
673        let dst_ = dst_map.as_mut_slice();
674
675        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
676        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
677        let (dst2, dst3) = dst2.split_at_mut(dst.width() * dst.height());
678
679        rayon::scope(|s| {
680            s.spawn(|_| {
681                src.par_iter()
682                    .zip_eq(dst0)
683                    .zip_eq(dst1)
684                    .zip_eq(dst2)
685                    .for_each(|(((s, d0), d1), d2)| {
686                        *d0 = s[0];
687                        *d1 = s[1];
688                        *d2 = s[2];
689                    })
690            });
691            s.spawn(|_| dst3.fill(255));
692        });
693        Ok(())
694    }
695
696    fn convert_rgb_to_yuyv(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
697        assert_eq!(src.fourcc(), RGB);
698        assert_eq!(dst.fourcc(), YUYV);
699
700        let src = src.tensor().map()?;
701        let src = src.as_slice();
702
703        let mut dst = dst.tensor().map()?;
704        let dst = dst.as_mut_slice();
705
706        // compute quantized Bt.709 limited range RGB to YUV matrix
707        const BIAS: i32 = 20;
708        const KR: f64 = 0.2126f64;
709        const KB: f64 = 0.0722f64;
710        const KG: f64 = 1.0 - KR - KB;
711        const Y_R: i32 = (KR * (219 << BIAS) as f64 / 255.0).round() as i32;
712        const Y_G: i32 = (KG * (219 << BIAS) as f64 / 255.0).round() as i32;
713        const Y_B: i32 = (KB * (219 << BIAS) as f64 / 255.0).round() as i32;
714
715        const U_R: i32 = (-KR / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
716        const U_G: i32 = (-KG / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
717        const U_B: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
718
719        const V_R: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
720        const V_G: i32 = (-KG / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
721        const V_B: i32 = (-KB / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
722        const ROUND: i32 = 1 << (BIAS - 1);
723        const ROUND2: i32 = 1 << BIAS;
724        let process_rgb_to_yuyv = |s: &[u8; 6], d: &mut [u8; 4]| {
725            let [r0, g0, b0, r1, g1, b1] = *s;
726            let r0 = r0 as i32;
727            let g0 = g0 as i32;
728            let b0 = b0 as i32;
729            let r1 = r1 as i32;
730            let g1 = g1 as i32;
731            let b1 = b1 as i32;
732            d[0] = ((Y_R * r0 + Y_G * g0 + Y_B * b0 + ROUND).shr(BIAS) + 16) as u8;
733            d[1] = ((U_R * r0 + U_G * g0 + U_B * b0 + U_R * r1 + U_G * g1 + U_B * b1 + ROUND2)
734                .shr(BIAS + 1)
735                + 128) as u8;
736            d[2] = ((Y_R * r1 + Y_G * g1 + Y_B * b1 + ROUND).shr(BIAS) + 16) as u8;
737            d[3] = ((V_R * r0 + V_G * g0 + V_B * b0 + V_R * r1 + V_G * g1 + V_B * b1 + ROUND2)
738                .shr(BIAS + 1)
739                + 128) as u8;
740        };
741
742        let src = src.as_chunks::<{ 6 * 32 }>();
743        let dst = dst.as_chunks_mut::<{ 4 * 32 }>();
744        for (s, d) in src.0.iter().zip(dst.0.iter_mut()) {
745            let s = s.as_chunks::<6>().0;
746            let d = d.as_chunks_mut::<4>().0;
747            for (s, d) in s.iter().zip(d.iter_mut()) {
748                process_rgb_to_yuyv(s, d);
749            }
750        }
751
752        let s = src.1.as_chunks::<6>().0;
753        let d = dst.1.as_chunks_mut::<4>().0;
754        for (s, d) in s.iter().zip(d.iter_mut()) {
755            process_rgb_to_yuyv(s, d);
756        }
757
758        Ok(())
759    }
760
761    fn convert_rgb_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
762        assert_eq!(src.fourcc(), RGB);
763        assert_eq!(dst.fourcc(), NV16);
764
765        let mut dst_map = dst.tensor().map()?;
766
767        let (y_plane, uv_plane) = dst_map.split_at_mut(dst.width() * dst.height());
768        let mut bi_planar_image = yuv::YuvBiPlanarImageMut::<u8> {
769            y_plane: yuv::BufferStoreMut::Borrowed(y_plane),
770            y_stride: dst.width() as u32,
771            uv_plane: yuv::BufferStoreMut::Borrowed(uv_plane),
772            uv_stride: dst.width() as u32,
773            width: dst.width() as u32,
774            height: dst.height() as u32,
775        };
776
777        Ok(yuv::rgb_to_yuv_nv16(
778            &mut bi_planar_image,
779            src.tensor.map()?.as_slice(),
780            src.row_stride() as u32,
781            yuv::YuvRange::Limited,
782            yuv::YuvStandardMatrix::Bt709,
783            yuv::YuvConversionMode::Balanced,
784        )?)
785    }
786
787    fn copy_image(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
788        assert_eq!(src.fourcc(), dst.fourcc());
789        dst.tensor().map()?.copy_from_slice(&src.tensor().map()?);
790        Ok(())
791    }
792
793    fn convert_nv16_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
794        assert_eq!(src.fourcc(), NV16);
795        assert_eq!(dst.fourcc(), RGB);
796        let map = src.tensor.map()?;
797        let y_stride = src.width() as u32;
798        let uv_stride = src.width() as u32;
799        let slices = map.as_slice().split_at(y_stride as usize * src.height());
800
801        let src = yuv::YuvBiPlanarImage {
802            y_plane: slices.0,
803            y_stride,
804            uv_plane: slices.1,
805            uv_stride,
806            width: src.width() as u32,
807            height: src.height() as u32,
808        };
809
810        Ok(yuv::yuv_nv16_to_rgb(
811            &src,
812            dst.tensor.map()?.as_mut_slice(),
813            dst.row_stride() as u32,
814            yuv::YuvRange::Limited,
815            yuv::YuvStandardMatrix::Bt709,
816            yuv::YuvConversionMode::Balanced,
817        )?)
818    }
819
820    fn convert_nv16_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
821        assert_eq!(src.fourcc(), NV16);
822        assert_eq!(dst.fourcc(), RGBA);
823        let map = src.tensor.map()?;
824        let y_stride = src.width() as u32;
825        let uv_stride = src.width() as u32;
826        let slices = map.as_slice().split_at(y_stride as usize * src.height());
827
828        let src = yuv::YuvBiPlanarImage {
829            y_plane: slices.0,
830            y_stride,
831            uv_plane: slices.1,
832            uv_stride,
833            width: src.width() as u32,
834            height: src.height() as u32,
835        };
836
837        Ok(yuv::yuv_nv16_to_rgba(
838            &src,
839            dst.tensor.map()?.as_mut_slice(),
840            dst.row_stride() as u32,
841            yuv::YuvRange::Limited,
842            yuv::YuvStandardMatrix::Bt709,
843            yuv::YuvConversionMode::Balanced,
844        )?)
845    }
846
847    fn convert_8bps_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
848        assert_eq!(src.fourcc(), PLANAR_RGB);
849        assert_eq!(dst.fourcc(), RGB);
850
851        let src_map = src.tensor().map()?;
852        let src_ = src_map.as_slice();
853
854        let (src0, src1) = src_.split_at(src.width() * src.height());
855        let (src1, src2) = src1.split_at(src.width() * src.height());
856
857        let mut dst_map = dst.tensor().map()?;
858        let dst_ = dst_map.as_mut_slice();
859
860        src0.par_iter()
861            .zip_eq(src1)
862            .zip_eq(src2)
863            .zip_eq(dst_.as_chunks_mut::<3>().0.par_iter_mut())
864            .for_each(|(((s0, s1), s2), d)| {
865                d[0] = *s0;
866                d[1] = *s1;
867                d[2] = *s2;
868            });
869        Ok(())
870    }
871
872    fn convert_8bps_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
873        assert_eq!(src.fourcc(), PLANAR_RGB);
874        assert_eq!(dst.fourcc(), RGBA);
875
876        let src_map = src.tensor().map()?;
877        let src_ = src_map.as_slice();
878
879        let (src0, src1) = src_.split_at(src.width() * src.height());
880        let (src1, src2) = src1.split_at(src.width() * src.height());
881
882        let mut dst_map = dst.tensor().map()?;
883        let dst_ = dst_map.as_mut_slice();
884
885        src0.par_iter()
886            .zip_eq(src1)
887            .zip_eq(src2)
888            .zip_eq(dst_.as_chunks_mut::<4>().0.par_iter_mut())
889            .for_each(|(((s0, s1), s2), d)| {
890                d[0] = *s0;
891                d[1] = *s1;
892                d[2] = *s2;
893                d[3] = 255;
894            });
895        Ok(())
896    }
897
898    fn convert_prgba_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
899        assert_eq!(src.fourcc(), PLANAR_RGBA);
900        assert_eq!(dst.fourcc(), RGB);
901
902        let src_map = src.tensor().map()?;
903        let src_ = src_map.as_slice();
904
905        let (src0, src1) = src_.split_at(src.width() * src.height());
906        let (src1, src2) = src1.split_at(src.width() * src.height());
907        let (src2, _src3) = src2.split_at(src.width() * src.height());
908
909        let mut dst_map = dst.tensor().map()?;
910        let dst_ = dst_map.as_mut_slice();
911
912        src0.par_iter()
913            .zip_eq(src1)
914            .zip_eq(src2)
915            .zip_eq(dst_.as_chunks_mut::<3>().0.par_iter_mut())
916            .for_each(|(((s0, s1), s2), d)| {
917                d[0] = *s0;
918                d[1] = *s1;
919                d[2] = *s2;
920            });
921        Ok(())
922    }
923
924    fn convert_prgba_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
925        assert_eq!(src.fourcc(), PLANAR_RGBA);
926        assert_eq!(dst.fourcc(), RGBA);
927
928        let src_map = src.tensor().map()?;
929        let src_ = src_map.as_slice();
930
931        let (src0, src1) = src_.split_at(src.width() * src.height());
932        let (src1, src2) = src1.split_at(src.width() * src.height());
933        let (src2, src3) = src2.split_at(src.width() * src.height());
934
935        let mut dst_map = dst.tensor().map()?;
936        let dst_ = dst_map.as_mut_slice();
937
938        src0.par_iter()
939            .zip_eq(src1)
940            .zip_eq(src2)
941            .zip_eq(src3)
942            .zip_eq(dst_.as_chunks_mut::<4>().0.par_iter_mut())
943            .for_each(|((((s0, s1), s2), s3), d)| {
944                d[0] = *s0;
945                d[1] = *s1;
946                d[2] = *s2;
947                d[3] = *s3;
948            });
949        Ok(())
950    }
951
952    pub(crate) fn support_conversion(src: FourCharCode, dst: FourCharCode) -> bool {
953        matches!(
954            (src, dst),
955            (NV12, RGB)
956                | (NV12, RGBA)
957                | (NV12, GREY)
958                | (NV16, RGB)
959                | (NV16, RGBA)
960                | (YUYV, RGB)
961                | (YUYV, RGBA)
962                | (YUYV, GREY)
963                | (YUYV, YUYV)
964                | (YUYV, PLANAR_RGB)
965                | (YUYV, PLANAR_RGBA)
966                | (YUYV, NV16)
967                | (RGBA, RGB)
968                | (RGBA, RGBA)
969                | (RGBA, GREY)
970                | (RGBA, YUYV)
971                | (RGBA, PLANAR_RGB)
972                | (RGBA, PLANAR_RGBA)
973                | (RGBA, NV16)
974                | (RGB, RGB)
975                | (RGB, RGBA)
976                | (RGB, GREY)
977                | (RGB, YUYV)
978                | (RGB, PLANAR_RGB)
979                | (RGB, PLANAR_RGBA)
980                | (RGB, NV16)
981                | (GREY, RGB)
982                | (GREY, RGBA)
983                | (GREY, GREY)
984                | (GREY, YUYV)
985                | (GREY, PLANAR_RGB)
986                | (GREY, PLANAR_RGBA)
987                | (GREY, NV16)
988        )
989    }
990
991    pub(crate) fn convert_format(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
992        // shapes should be equal
993        let _timer = FunctionTimer::new(format!(
994            "ImageProcessor::convert_format {} to {}",
995            src.fourcc().display(),
996            dst.fourcc().display()
997        ));
998        assert_eq!(src.height(), dst.height());
999        assert_eq!(src.width(), dst.width());
1000
1001        match (src.fourcc(), dst.fourcc()) {
1002            (NV12, RGB) => Self::convert_nv12_to_rgb(src, dst),
1003            (NV12, RGBA) => Self::convert_nv12_to_rgba(src, dst),
1004            (NV12, GREY) => Self::convert_nv12_to_grey(src, dst),
1005            (YUYV, RGB) => Self::convert_yuyv_to_rgb(src, dst),
1006            (YUYV, RGBA) => Self::convert_yuyv_to_rgba(src, dst),
1007            (YUYV, GREY) => Self::convert_yuyv_to_grey(src, dst),
1008            (YUYV, YUYV) => Self::copy_image(src, dst),
1009            (YUYV, PLANAR_RGB) => Self::convert_yuyv_to_8bps(src, dst),
1010            (YUYV, PLANAR_RGBA) => Self::convert_yuyv_to_prgba(src, dst),
1011            (YUYV, NV16) => Self::convert_yuyv_to_nv16(src, dst),
1012            (RGBA, RGB) => Self::convert_rgba_to_rgb(src, dst),
1013            (RGBA, RGBA) => Self::copy_image(src, dst),
1014            (RGBA, GREY) => Self::convert_rgba_to_grey(src, dst),
1015            (RGBA, YUYV) => Self::convert_rgba_to_yuyv(src, dst),
1016            (RGBA, PLANAR_RGB) => Self::convert_rgba_to_8bps(src, dst),
1017            (RGBA, PLANAR_RGBA) => Self::convert_rgba_to_prgba(src, dst),
1018            (RGBA, NV16) => Self::convert_rgba_to_nv16(src, dst),
1019            (RGB, RGB) => Self::copy_image(src, dst),
1020            (RGB, RGBA) => Self::convert_rgb_to_rgba(src, dst),
1021            (RGB, GREY) => Self::convert_rgb_to_grey(src, dst),
1022            (RGB, YUYV) => Self::convert_rgb_to_yuyv(src, dst),
1023            (RGB, PLANAR_RGB) => Self::convert_rgb_to_8bps(src, dst),
1024            (RGB, PLANAR_RGBA) => Self::convert_rgb_to_prgba(src, dst),
1025            (RGB, NV16) => Self::convert_rgb_to_nv16(src, dst),
1026            (GREY, RGB) => Self::convert_grey_to_rgb(src, dst),
1027            (GREY, RGBA) => Self::convert_grey_to_rgba(src, dst),
1028            (GREY, GREY) => Self::copy_image(src, dst),
1029            (GREY, YUYV) => Self::convert_grey_to_yuyv(src, dst),
1030            (GREY, PLANAR_RGB) => Self::convert_grey_to_8bps(src, dst),
1031            (GREY, PLANAR_RGBA) => Self::convert_grey_to_prgba(src, dst),
1032            (GREY, NV16) => Self::convert_grey_to_nv16(src, dst),
1033
1034            // the following converts are added for use in testing
1035            (NV16, RGB) => Self::convert_nv16_to_rgb(src, dst),
1036            (NV16, RGBA) => Self::convert_nv16_to_rgba(src, dst),
1037            (PLANAR_RGB, RGB) => Self::convert_8bps_to_rgb(src, dst),
1038            (PLANAR_RGB, RGBA) => Self::convert_8bps_to_rgba(src, dst),
1039            (PLANAR_RGBA, RGB) => Self::convert_prgba_to_rgb(src, dst),
1040            (PLANAR_RGBA, RGBA) => Self::convert_prgba_to_rgba(src, dst),
1041            (s, d) => Err(Error::NotSupported(format!(
1042                "Conversion from {} to {}",
1043                s.display(),
1044                d.display()
1045            ))),
1046        }
1047    }
1048
1049    /// Generic RGB to PLANAR_RGB conversion that works with any TensorImageDst.
1050    fn convert_rgb_to_planar_rgb_generic<D: TensorImageDst>(
1051        src: &TensorImage,
1052        dst: &mut D,
1053    ) -> Result<()> {
1054        assert_eq!(src.fourcc(), RGB);
1055        assert_eq!(dst.fourcc(), PLANAR_RGB);
1056
1057        let src = src.tensor().map()?;
1058        let src = src.as_slice();
1059        let src = src.as_chunks::<3>().0;
1060
1061        let mut dst_map = dst.tensor_mut().map()?;
1062        let dst_ = dst_map.as_mut_slice();
1063
1064        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
1065        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
1066
1067        src.par_iter()
1068            .zip_eq(dst0)
1069            .zip_eq(dst1)
1070            .zip_eq(dst2)
1071            .for_each(|(((s, d0), d1), d2)| {
1072                *d0 = s[0];
1073                *d1 = s[1];
1074                *d2 = s[2];
1075            });
1076        Ok(())
1077    }
1078
1079    /// Generic RGBA to PLANAR_RGB conversion that works with any
1080    /// TensorImageDst.
1081    fn convert_rgba_to_planar_rgb_generic<D: TensorImageDst>(
1082        src: &TensorImage,
1083        dst: &mut D,
1084    ) -> Result<()> {
1085        assert_eq!(src.fourcc(), RGBA);
1086        assert_eq!(dst.fourcc(), PLANAR_RGB);
1087
1088        let src = src.tensor().map()?;
1089        let src = src.as_slice();
1090        let src = src.as_chunks::<4>().0;
1091
1092        let mut dst_map = dst.tensor_mut().map()?;
1093        let dst_ = dst_map.as_mut_slice();
1094
1095        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
1096        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
1097
1098        src.par_iter()
1099            .zip_eq(dst0)
1100            .zip_eq(dst1)
1101            .zip_eq(dst2)
1102            .for_each(|(((s, d0), d1), d2)| {
1103                *d0 = s[0];
1104                *d1 = s[1];
1105                *d2 = s[2];
1106            });
1107        Ok(())
1108    }
1109
1110    /// Generic copy for same-format images that works with any TensorImageDst.
1111    fn copy_image_generic<D: TensorImageDst>(src: &TensorImage, dst: &mut D) -> Result<()> {
1112        assert_eq!(src.fourcc(), dst.fourcc());
1113        dst.tensor_mut()
1114            .map()?
1115            .copy_from_slice(&src.tensor().map()?);
1116        Ok(())
1117    }
1118
1119    /// Format conversion that writes to a generic TensorImageDst.
1120    /// Supports common zero-copy preprocessing cases.
1121    pub(crate) fn convert_format_generic<D: TensorImageDst>(
1122        src: &TensorImage,
1123        dst: &mut D,
1124    ) -> Result<()> {
1125        let _timer = FunctionTimer::new(format!(
1126            "ImageProcessor::convert_format_generic {} to {}",
1127            src.fourcc().display(),
1128            dst.fourcc().display()
1129        ));
1130        assert_eq!(src.height(), dst.height());
1131        assert_eq!(src.width(), dst.width());
1132
1133        match (src.fourcc(), dst.fourcc()) {
1134            (RGB, PLANAR_RGB) => Self::convert_rgb_to_planar_rgb_generic(src, dst),
1135            (RGBA, PLANAR_RGB) => Self::convert_rgba_to_planar_rgb_generic(src, dst),
1136            (f1, f2) if f1 == f2 => Self::copy_image_generic(src, dst),
1137            (s, d) => Err(Error::NotSupported(format!(
1138                "Generic conversion from {} to {} not supported",
1139                s.display(),
1140                d.display()
1141            ))),
1142        }
1143    }
1144
1145    /// The src and dest img should be in RGB/RGBA/grey format for correct
1146    /// output. If the format is not 1, 3, or 4 bits per pixel, and error will
1147    /// be returned. The src and dest img must have the same fourcc,
1148    /// otherwise the function will panic.
1149    fn resize_flip_rotate(
1150        &mut self,
1151        src: &TensorImage,
1152        dst: &mut TensorImage,
1153        rotation: Rotation,
1154        flip: Flip,
1155        crop: Crop,
1156    ) -> Result<()> {
1157        let _timer = FunctionTimer::new(format!(
1158            "ImageProcessor::resize_flip_rotate {}x{} to {}x{} {}",
1159            src.width(),
1160            src.height(),
1161            dst.width(),
1162            dst.height(),
1163            dst.fourcc().display()
1164        ));
1165        assert_eq!(src.fourcc(), dst.fourcc());
1166
1167        let src_type = match src.channels() {
1168            1 => fast_image_resize::PixelType::U8,
1169            3 => fast_image_resize::PixelType::U8x3,
1170            4 => fast_image_resize::PixelType::U8x4,
1171            _ => {
1172                return Err(Error::NotImplemented(
1173                    "Unsupported source image format".to_string(),
1174                ));
1175            }
1176        };
1177
1178        let mut src_map = src.tensor().map()?;
1179
1180        let mut dst_map = dst.tensor().map()?;
1181
1182        let options = if let Some(crop) = crop.src_rect {
1183            self.options.crop(
1184                crop.left as f64,
1185                crop.top as f64,
1186                crop.width as f64,
1187                crop.height as f64,
1188            )
1189        } else {
1190            self.options
1191        };
1192
1193        let mut dst_rect = crop.dst_rect.unwrap_or_else(|| Rect {
1194            left: 0,
1195            top: 0,
1196            width: dst.width(),
1197            height: dst.height(),
1198        });
1199
1200        // adjust crop box for rotation/flip
1201        Self::adjust_dest_rect_for_rotate_flip(&mut dst_rect, dst, rotation, flip);
1202
1203        let needs_resize = src.width() != dst.width()
1204            || src.height() != dst.height()
1205            || crop.src_rect.is_some_and(|crop| {
1206                crop != Rect {
1207                    left: 0,
1208                    top: 0,
1209                    width: src.width(),
1210                    height: src.height(),
1211                }
1212            })
1213            || crop.dst_rect.is_some_and(|crop| {
1214                crop != Rect {
1215                    left: 0,
1216                    top: 0,
1217                    width: dst.width(),
1218                    height: dst.height(),
1219                }
1220            });
1221
1222        if needs_resize {
1223            let src_view = fast_image_resize::images::Image::from_slice_u8(
1224                src.width() as u32,
1225                src.height() as u32,
1226                &mut src_map,
1227                src_type,
1228            )?;
1229
1230            match (rotation, flip) {
1231                (Rotation::None, Flip::None) => {
1232                    let mut dst_view = fast_image_resize::images::Image::from_slice_u8(
1233                        dst.width() as u32,
1234                        dst.height() as u32,
1235                        &mut dst_map,
1236                        src_type,
1237                    )?;
1238
1239                    let mut dst_view = fast_image_resize::images::CroppedImageMut::new(
1240                        &mut dst_view,
1241                        dst_rect.left as u32,
1242                        dst_rect.top as u32,
1243                        dst_rect.width as u32,
1244                        dst_rect.height as u32,
1245                    )?;
1246
1247                    self.resizer.resize(&src_view, &mut dst_view, &options)?;
1248                }
1249                (Rotation::Clockwise90, _) | (Rotation::CounterClockwise90, _) => {
1250                    let mut tmp = vec![0; dst.row_stride() * dst.height()];
1251                    let mut tmp_view = fast_image_resize::images::Image::from_slice_u8(
1252                        dst.height() as u32,
1253                        dst.width() as u32,
1254                        &mut tmp,
1255                        src_type,
1256                    )?;
1257
1258                    let mut tmp_view = fast_image_resize::images::CroppedImageMut::new(
1259                        &mut tmp_view,
1260                        dst_rect.left as u32,
1261                        dst_rect.top as u32,
1262                        dst_rect.width as u32,
1263                        dst_rect.height as u32,
1264                    )?;
1265
1266                    self.resizer.resize(&src_view, &mut tmp_view, &options)?;
1267                    Self::flip_rotate_ndarray(&tmp, &mut dst_map, dst, rotation, flip)?;
1268                }
1269                (Rotation::None, _) | (Rotation::Rotate180, _) => {
1270                    let mut tmp = vec![0; dst.row_stride() * dst.height()];
1271                    let mut tmp_view = fast_image_resize::images::Image::from_slice_u8(
1272                        dst.width() as u32,
1273                        dst.height() as u32,
1274                        &mut tmp,
1275                        src_type,
1276                    )?;
1277
1278                    let mut tmp_view = fast_image_resize::images::CroppedImageMut::new(
1279                        &mut tmp_view,
1280                        dst_rect.left as u32,
1281                        dst_rect.top as u32,
1282                        dst_rect.width as u32,
1283                        dst_rect.height as u32,
1284                    )?;
1285
1286                    self.resizer.resize(&src_view, &mut tmp_view, &options)?;
1287                    Self::flip_rotate_ndarray(&tmp, &mut dst_map, dst, rotation, flip)?;
1288                }
1289            }
1290        } else {
1291            Self::flip_rotate_ndarray(&src_map, &mut dst_map, dst, rotation, flip)?;
1292        }
1293        Ok(())
1294    }
1295
1296    fn adjust_dest_rect_for_rotate_flip(
1297        crop: &mut Rect,
1298        dst: &TensorImage,
1299        rot: Rotation,
1300        flip: Flip,
1301    ) {
1302        match rot {
1303            Rotation::None => {}
1304            Rotation::Clockwise90 => {
1305                *crop = Rect {
1306                    left: crop.top,
1307                    top: dst.width() - crop.left - crop.width,
1308                    width: crop.height,
1309                    height: crop.width,
1310                }
1311            }
1312            Rotation::Rotate180 => {
1313                *crop = Rect {
1314                    left: dst.width() - crop.left - crop.width,
1315                    top: dst.height() - crop.top - crop.height,
1316                    width: crop.width,
1317                    height: crop.height,
1318                }
1319            }
1320            Rotation::CounterClockwise90 => {
1321                *crop = Rect {
1322                    left: dst.height() - crop.top - crop.height,
1323                    top: crop.left,
1324                    width: crop.height,
1325                    height: crop.width,
1326                }
1327            }
1328        }
1329
1330        match flip {
1331            Flip::None => {}
1332            Flip::Vertical => crop.top = dst.height() - crop.top - crop.height,
1333            Flip::Horizontal => crop.left = dst.width() - crop.left - crop.width,
1334        }
1335    }
1336
1337    pub(crate) fn fill_image_outside_crop(
1338        dst: &mut TensorImage,
1339        rgba: [u8; 4],
1340        crop: Rect,
1341    ) -> Result<()> {
1342        let dst_fourcc = dst.fourcc();
1343        let mut dst_map = dst.tensor().map()?;
1344        let dst = (dst_map.as_mut_slice(), dst.width(), dst.height());
1345        match dst_fourcc {
1346            RGBA => Self::fill_image_outside_crop_(dst, rgba, crop),
1347            RGB => Self::fill_image_outside_crop_(dst, Self::rgba_to_rgb(rgba), crop),
1348            GREY => Self::fill_image_outside_crop_(dst, Self::rgba_to_grey(rgba), crop),
1349            YUYV => Self::fill_image_outside_crop_(
1350                (dst.0, dst.1 / 2, dst.2),
1351                Self::rgba_to_yuyv(rgba),
1352                Rect::new(crop.left / 2, crop.top, crop.width.div_ceil(2), crop.height),
1353            ),
1354            PLANAR_RGB => Self::fill_image_outside_crop_planar(dst, Self::rgba_to_rgb(rgba), crop),
1355            PLANAR_RGBA => Self::fill_image_outside_crop_planar(dst, rgba, crop),
1356            NV16 => {
1357                let yuyv = Self::rgba_to_yuyv(rgba);
1358                Self::fill_image_outside_crop_yuv_semiplanar(dst, yuyv[0], [yuyv[1], yuyv[3]], crop)
1359            }
1360            _ => Err(Error::Internal(format!(
1361                "Found unexpected destination {}",
1362                dst_fourcc.display()
1363            ))),
1364        }
1365    }
1366
1367    /// Generic fill for TensorImageDst types.
1368    pub(crate) fn fill_image_outside_crop_generic<D: TensorImageDst>(
1369        dst: &mut D,
1370        rgba: [u8; 4],
1371        crop: Rect,
1372    ) -> Result<()> {
1373        let dst_fourcc = dst.fourcc();
1374        let dst_width = dst.width();
1375        let dst_height = dst.height();
1376        let mut dst_map = dst.tensor_mut().map()?;
1377        let dst = (dst_map.as_mut_slice(), dst_width, dst_height);
1378        match dst_fourcc {
1379            RGBA => Self::fill_image_outside_crop_(dst, rgba, crop),
1380            RGB => Self::fill_image_outside_crop_(dst, Self::rgba_to_rgb(rgba), crop),
1381            GREY => Self::fill_image_outside_crop_(dst, Self::rgba_to_grey(rgba), crop),
1382            YUYV => Self::fill_image_outside_crop_(
1383                (dst.0, dst.1 / 2, dst.2),
1384                Self::rgba_to_yuyv(rgba),
1385                Rect::new(crop.left / 2, crop.top, crop.width.div_ceil(2), crop.height),
1386            ),
1387            PLANAR_RGB => Self::fill_image_outside_crop_planar(dst, Self::rgba_to_rgb(rgba), crop),
1388            PLANAR_RGBA => Self::fill_image_outside_crop_planar(dst, rgba, crop),
1389            NV16 => {
1390                let yuyv = Self::rgba_to_yuyv(rgba);
1391                Self::fill_image_outside_crop_yuv_semiplanar(dst, yuyv[0], [yuyv[1], yuyv[3]], crop)
1392            }
1393            _ => Err(Error::Internal(format!(
1394                "Found unexpected destination {}",
1395                dst_fourcc.display()
1396            ))),
1397        }
1398    }
1399
1400    fn fill_image_outside_crop_<const N: usize>(
1401        (dst, dst_width, _dst_height): (&mut [u8], usize, usize),
1402        pix: [u8; N],
1403        crop: Rect,
1404    ) -> Result<()> {
1405        use rayon::{
1406            iter::{IntoParallelRefMutIterator, ParallelIterator},
1407            prelude::ParallelSliceMut,
1408        };
1409
1410        let s = dst.as_chunks_mut::<N>().0;
1411        // calculate the top/bottom
1412        let top_offset = (0, (crop.top * dst_width + crop.left));
1413        let bottom_offset = (
1414            ((crop.top + crop.height) * dst_width + crop.left).min(s.len()),
1415            s.len(),
1416        );
1417
1418        s[top_offset.0..top_offset.1]
1419            .par_iter_mut()
1420            .for_each(|x| *x = pix);
1421
1422        s[bottom_offset.0..bottom_offset.1]
1423            .par_iter_mut()
1424            .for_each(|x| *x = pix);
1425
1426        if dst_width == crop.width {
1427            return Ok(());
1428        }
1429
1430        // the middle part has a stride as well
1431        let middle_stride = dst_width - crop.width;
1432        let middle_offset = (
1433            (crop.top * dst_width + crop.left + crop.width),
1434            ((crop.top + crop.height) * dst_width + crop.left + crop.width).min(s.len()),
1435        );
1436
1437        s[middle_offset.0..middle_offset.1]
1438            .par_chunks_exact_mut(dst_width)
1439            .for_each(|row| {
1440                for p in &mut row[0..middle_stride] {
1441                    *p = pix;
1442                }
1443            });
1444
1445        Ok(())
1446    }
1447
1448    fn fill_image_outside_crop_planar<const N: usize>(
1449        (dst, dst_width, dst_height): (&mut [u8], usize, usize),
1450        pix: [u8; N],
1451        crop: Rect,
1452    ) -> Result<()> {
1453        use rayon::{
1454            iter::{IntoParallelRefMutIterator, ParallelIterator},
1455            prelude::ParallelSliceMut,
1456        };
1457
1458        // map.as_mut_slice().splitn_mut(n, pred)
1459        let s_rem = dst;
1460
1461        s_rem
1462            .par_chunks_exact_mut(dst_height * dst_width)
1463            .zip(pix)
1464            .for_each(|(s, p)| {
1465                let top_offset = (0, (crop.top * dst_width + crop.left));
1466                let bottom_offset = (
1467                    ((crop.top + crop.height) * dst_width + crop.left).min(s.len()),
1468                    s.len(),
1469                );
1470
1471                s[top_offset.0..top_offset.1]
1472                    .par_iter_mut()
1473                    .for_each(|x| *x = p);
1474
1475                s[bottom_offset.0..bottom_offset.1]
1476                    .par_iter_mut()
1477                    .for_each(|x| *x = p);
1478
1479                if dst_width == crop.width {
1480                    return;
1481                }
1482
1483                // the middle part has a stride as well
1484                let middle_stride = dst_width - crop.width;
1485                let middle_offset = (
1486                    (crop.top * dst_width + crop.left + crop.width),
1487                    ((crop.top + crop.height) * dst_width + crop.left + crop.width).min(s.len()),
1488                );
1489
1490                s[middle_offset.0..middle_offset.1]
1491                    .par_chunks_exact_mut(dst_width)
1492                    .for_each(|row| {
1493                        for x in &mut row[0..middle_stride] {
1494                            *x = p;
1495                        }
1496                    });
1497            });
1498        Ok(())
1499    }
1500
1501    fn fill_image_outside_crop_yuv_semiplanar(
1502        (dst, dst_width, dst_height): (&mut [u8], usize, usize),
1503        y: u8,
1504        uv: [u8; 2],
1505        mut crop: Rect,
1506    ) -> Result<()> {
1507        let (y_plane, uv_plane) = dst.split_at_mut(dst_width * dst_height);
1508        Self::fill_image_outside_crop_::<1>((y_plane, dst_width, dst_height), [y], crop)?;
1509        crop.left /= 2;
1510        crop.width /= 2;
1511        Self::fill_image_outside_crop_::<2>((uv_plane, dst_width / 2, dst_height), uv, crop)?;
1512        Ok(())
1513    }
1514
1515    fn rgba_to_rgb(rgba: [u8; 4]) -> [u8; 3] {
1516        let [r, g, b, _] = rgba;
1517        [r, g, b]
1518    }
1519
1520    fn rgba_to_grey(rgba: [u8; 4]) -> [u8; 1] {
1521        const BIAS: i32 = 20;
1522        const KR: f64 = 0.2126f64;
1523        const KB: f64 = 0.0722f64;
1524        const KG: f64 = 1.0 - KR - KB;
1525        const Y_R: i32 = (KR * (255 << BIAS) as f64 / 255.0).round() as i32;
1526        const Y_G: i32 = (KG * (255 << BIAS) as f64 / 255.0).round() as i32;
1527        const Y_B: i32 = (KB * (255 << BIAS) as f64 / 255.0).round() as i32;
1528
1529        const ROUND: i32 = 1 << (BIAS - 1);
1530
1531        let [r, g, b, _] = rgba;
1532        let y = ((Y_R * r as i32 + Y_G * g as i32 + Y_B * b as i32 + ROUND) >> BIAS) as u8;
1533        [y]
1534    }
1535
1536    fn rgba_to_yuyv(rgba: [u8; 4]) -> [u8; 4] {
1537        const KR: f64 = 0.2126f64;
1538        const KB: f64 = 0.0722f64;
1539        const KG: f64 = 1.0 - KR - KB;
1540        const BIAS: i32 = 20;
1541
1542        const Y_R: i32 = (KR * (219 << BIAS) as f64 / 255.0).round() as i32;
1543        const Y_G: i32 = (KG * (219 << BIAS) as f64 / 255.0).round() as i32;
1544        const Y_B: i32 = (KB * (219 << BIAS) as f64 / 255.0).round() as i32;
1545
1546        const U_R: i32 = (-KR / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1547        const U_G: i32 = (-KG / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1548        const U_B: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
1549
1550        const V_R: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
1551        const V_G: i32 = (-KG / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1552        const V_B: i32 = (-KB / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1553        const ROUND: i32 = 1 << (BIAS - 1);
1554
1555        let [r, g, b, _] = rgba;
1556        let r = r as i32;
1557        let g = g as i32;
1558        let b = b as i32;
1559        let y = (((Y_R * r + Y_G * g + Y_B * b + ROUND) >> BIAS) + 16) as u8;
1560        let u = (((U_R * r + U_G * g + U_B * b + ROUND) >> BIAS) + 128) as u8;
1561        let v = (((V_R * r + V_G * g + V_B * b + ROUND) >> BIAS) + 128) as u8;
1562
1563        [y, u, y, v]
1564    }
1565
1566    #[cfg(feature = "decoder")]
1567    fn render_modelpack_segmentation(
1568        &mut self,
1569        dst: &TensorImage,
1570        dst_slice: &mut [u8],
1571        segmentation: &Segmentation,
1572    ) -> Result<()> {
1573        use ndarray_stats::QuantileExt;
1574
1575        let seg = &segmentation.segmentation;
1576        let [seg_height, seg_width, seg_classes] = *seg.shape() else {
1577            unreachable!("Array3 did not have [usize; 3] as shape");
1578        };
1579        let start_y = (dst.height() as f32 * segmentation.ymin).round();
1580        let end_y = (dst.height() as f32 * segmentation.ymax).round();
1581        let start_x = (dst.width() as f32 * segmentation.xmin).round();
1582        let end_x = (dst.width() as f32 * segmentation.xmax).round();
1583
1584        let scale_x = (seg_width as f32 - 1.0) / ((end_x - start_x) - 1.0);
1585        let scale_y = (seg_height as f32 - 1.0) / ((end_y - start_y) - 1.0);
1586
1587        let start_x_u = (start_x as usize).min(dst.width());
1588        let start_y_u = (start_y as usize).min(dst.height());
1589        let end_x_u = (end_x as usize).min(dst.width());
1590        let end_y_u = (end_y as usize).min(dst.height());
1591
1592        let argmax = seg.map_axis(Axis(2), |r| r.argmax().unwrap());
1593        let get_value_at_nearest = |x: f32, y: f32| -> usize {
1594            let x = x.round() as usize;
1595            let y = y.round() as usize;
1596            argmax
1597                .get([y.min(seg_height - 1), x.min(seg_width - 1)])
1598                .copied()
1599                .unwrap_or(0)
1600        };
1601
1602        for y in start_y_u..end_y_u {
1603            for x in start_x_u..end_x_u {
1604                let seg_x = (x as f32 - start_x) * scale_x;
1605                let seg_y = (y as f32 - start_y) * scale_y;
1606                let label = get_value_at_nearest(seg_x, seg_y);
1607
1608                if label == seg_classes - 1 {
1609                    continue;
1610                }
1611
1612                let color = self.colors[label % self.colors.len()];
1613
1614                let alpha = color[3] as u16;
1615
1616                let dst_index = (y * dst.row_stride()) + (x * dst.channels());
1617                for c in 0..3 {
1618                    dst_slice[dst_index + c] = ((color[c] as u16 * alpha
1619                        + dst_slice[dst_index + c] as u16 * (255 - alpha))
1620                        / 255) as u8;
1621                }
1622            }
1623        }
1624
1625        Ok(())
1626    }
1627
1628    #[cfg(feature = "decoder")]
1629    fn render_yolo_segmentation(
1630        &mut self,
1631        dst: &TensorImage,
1632        dst_slice: &mut [u8],
1633        segmentation: &Segmentation,
1634        class: usize,
1635    ) -> Result<()> {
1636        let seg = &segmentation.segmentation;
1637        let [seg_height, seg_width, classes] = *seg.shape() else {
1638            unreachable!("Array3 did not have [usize;3] as shape");
1639        };
1640        debug_assert_eq!(classes, 1);
1641
1642        let start_y = (dst.height() as f32 * segmentation.ymin).round();
1643        let end_y = (dst.height() as f32 * segmentation.ymax).round();
1644        let start_x = (dst.width() as f32 * segmentation.xmin).round();
1645        let end_x = (dst.width() as f32 * segmentation.xmax).round();
1646
1647        let scale_x = (seg_width as f32 - 1.0) / ((end_x - start_x) - 1.0);
1648        let scale_y = (seg_height as f32 - 1.0) / ((end_y - start_y) - 1.0);
1649
1650        let start_x_u = (start_x as usize).min(dst.width());
1651        let start_y_u = (start_y as usize).min(dst.height());
1652        let end_x_u = (end_x as usize).min(dst.width());
1653        let end_y_u = (end_y as usize).min(dst.height());
1654
1655        for y in start_y_u..end_y_u {
1656            for x in start_x_u..end_x_u {
1657                let seg_x = ((x as f32 - start_x) * scale_x) as usize;
1658                let seg_y = ((y as f32 - start_y) * scale_y) as usize;
1659                let val = *seg.get([seg_y, seg_x, 0]).unwrap_or(&0);
1660
1661                if val < 127 {
1662                    continue;
1663                }
1664
1665                let color = self.colors[class % self.colors.len()];
1666
1667                let alpha = color[3] as u16;
1668
1669                let dst_index = (y * dst.row_stride()) + (x * dst.channels());
1670                for c in 0..3 {
1671                    dst_slice[dst_index + c] = ((color[c] as u16 * alpha
1672                        + dst_slice[dst_index + c] as u16 * (255 - alpha))
1673                        / 255) as u8;
1674                }
1675            }
1676        }
1677
1678        Ok(())
1679    }
1680
1681    #[cfg(feature = "decoder")]
1682    fn render_box(
1683        &mut self,
1684        dst: &TensorImage,
1685        dst_slice: &mut [u8],
1686        detect: &[DetectBox],
1687    ) -> Result<()> {
1688        const LINE_THICKNESS: usize = 3;
1689        for d in detect {
1690            use edgefirst_decoder::BoundingBox;
1691
1692            let label = d.label;
1693            let [r, g, b, _] = self.colors[label % self.colors.len()];
1694            let bbox = d.bbox.to_canonical();
1695            let bbox = BoundingBox {
1696                xmin: bbox.xmin.clamp(0.0, 1.0),
1697                ymin: bbox.ymin.clamp(0.0, 1.0),
1698                xmax: bbox.xmax.clamp(0.0, 1.0),
1699                ymax: bbox.ymax.clamp(0.0, 1.0),
1700            };
1701            let inner = [
1702                ((dst.width() - 1) as f32 * bbox.xmin - 0.5).round() as usize,
1703                ((dst.height() - 1) as f32 * bbox.ymin - 0.5).round() as usize,
1704                ((dst.width() - 1) as f32 * bbox.xmax + 0.5).round() as usize,
1705                ((dst.height() - 1) as f32 * bbox.ymax + 0.5).round() as usize,
1706            ];
1707
1708            let outer = [
1709                inner[0].saturating_sub(LINE_THICKNESS),
1710                inner[1].saturating_sub(LINE_THICKNESS),
1711                (inner[2] + LINE_THICKNESS).min(dst.width()),
1712                (inner[3] + LINE_THICKNESS).min(dst.height()),
1713            ];
1714
1715            // top line
1716            for y in outer[1] + 1..=inner[1] {
1717                for x in outer[0] + 1..outer[2] {
1718                    let index = (y * dst.row_stride()) + (x * dst.channels());
1719                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1720                }
1721            }
1722
1723            // left and right lines
1724            for y in inner[1]..inner[3] {
1725                for x in outer[0] + 1..=inner[0] {
1726                    let index = (y * dst.row_stride()) + (x * dst.channels());
1727                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1728                }
1729
1730                for x in inner[2]..outer[2] {
1731                    let index = (y * dst.row_stride()) + (x * dst.channels());
1732                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1733                }
1734            }
1735
1736            // bottom line
1737            for y in inner[3]..outer[3] {
1738                for x in outer[0] + 1..outer[2] {
1739                    let index = (y * dst.row_stride()) + (x * dst.channels());
1740                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1741                }
1742            }
1743        }
1744        Ok(())
1745    }
1746}
1747
1748impl ImageProcessorTrait for CPUProcessor {
1749    fn convert(
1750        &mut self,
1751        src: &TensorImage,
1752        dst: &mut TensorImage,
1753        rotation: Rotation,
1754        flip: Flip,
1755        crop: Crop,
1756    ) -> Result<()> {
1757        crop.check_crop(src, dst)?;
1758        // supported destinations and srcs:
1759        let intermediate = match (src.fourcc(), dst.fourcc()) {
1760            (NV12, RGB) => RGB,
1761            (NV12, RGBA) => RGBA,
1762            (NV12, GREY) => GREY,
1763            (NV12, YUYV) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1764            (NV12, NV16) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1765            (NV12, PLANAR_RGB) => RGB,
1766            (NV12, PLANAR_RGBA) => RGBA,
1767            (YUYV, RGB) => RGB,
1768            (YUYV, RGBA) => RGBA,
1769            (YUYV, GREY) => GREY,
1770            (YUYV, YUYV) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1771            (YUYV, PLANAR_RGB) => RGB,
1772            (YUYV, PLANAR_RGBA) => RGBA,
1773            (YUYV, NV16) => RGBA,
1774            (RGBA, RGB) => RGBA,
1775            (RGBA, RGBA) => RGBA,
1776            (RGBA, GREY) => GREY,
1777            (RGBA, YUYV) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1778            (RGBA, PLANAR_RGB) => RGBA,
1779            (RGBA, PLANAR_RGBA) => RGBA,
1780            (RGBA, NV16) => RGBA,
1781            (RGB, RGB) => RGB,
1782            (RGB, RGBA) => RGB,
1783            (RGB, GREY) => GREY,
1784            (RGB, YUYV) => RGB, // RGB intermediary for YUYV dest resize/convert/rotation/flip
1785            (RGB, PLANAR_RGB) => RGB,
1786            (RGB, PLANAR_RGBA) => RGB,
1787            (RGB, NV16) => RGB,
1788            (GREY, RGB) => RGB,
1789            (GREY, RGBA) => RGBA,
1790            (GREY, GREY) => GREY,
1791            (GREY, YUYV) => GREY,
1792            (GREY, PLANAR_RGB) => GREY,
1793            (GREY, PLANAR_RGBA) => GREY,
1794            (GREY, NV16) => GREY,
1795            (s, d) => {
1796                return Err(Error::NotSupported(format!(
1797                    "Conversion from {} to {}",
1798                    s.display(),
1799                    d.display()
1800                )));
1801            }
1802        };
1803
1804        // let crop = crop.src_rect;
1805
1806        let need_resize_flip_rotation = rotation != Rotation::None
1807            || flip != Flip::None
1808            || src.width() != dst.width()
1809            || src.height() != dst.height()
1810            || crop.src_rect.is_some_and(|crop| {
1811                crop != Rect {
1812                    left: 0,
1813                    top: 0,
1814                    width: src.width(),
1815                    height: src.height(),
1816                }
1817            })
1818            || crop.dst_rect.is_some_and(|crop| {
1819                crop != Rect {
1820                    left: 0,
1821                    top: 0,
1822                    width: dst.width(),
1823                    height: dst.height(),
1824                }
1825            });
1826
1827        // check if a direct conversion can be done
1828        if !need_resize_flip_rotation && Self::support_conversion(src.fourcc(), dst.fourcc()) {
1829            return Self::convert_format(src, dst);
1830        };
1831
1832        // any extra checks
1833        if dst.fourcc() == YUYV && !dst.width().is_multiple_of(2) {
1834            return Err(Error::NotSupported(format!(
1835                "{} destination must have width divisible by 2",
1836                dst.fourcc().display(),
1837            )));
1838        }
1839
1840        // create tmp buffer
1841        let mut tmp_buffer;
1842        let tmp;
1843        if intermediate != src.fourcc() {
1844            tmp_buffer = TensorImage::new(
1845                src.width(),
1846                src.height(),
1847                intermediate,
1848                Some(edgefirst_tensor::TensorMemory::Mem),
1849            )?;
1850
1851            Self::convert_format(src, &mut tmp_buffer)?;
1852            tmp = &tmp_buffer;
1853        } else {
1854            tmp = src;
1855        }
1856
1857        // format must be RGB/RGBA/GREY
1858        matches!(tmp.fourcc(), RGB | RGBA | GREY);
1859        if tmp.fourcc() == dst.fourcc() {
1860            self.resize_flip_rotate(tmp, dst, rotation, flip, crop)?;
1861        } else if !need_resize_flip_rotation {
1862            Self::convert_format(tmp, dst)?;
1863        } else {
1864            let mut tmp2 = TensorImage::new(
1865                dst.width(),
1866                dst.height(),
1867                tmp.fourcc(),
1868                Some(edgefirst_tensor::TensorMemory::Mem),
1869            )?;
1870            if crop.dst_rect.is_some_and(|crop| {
1871                crop != Rect {
1872                    left: 0,
1873                    top: 0,
1874                    width: dst.width(),
1875                    height: dst.height(),
1876                }
1877            }) && crop.dst_color.is_none()
1878            {
1879                // convert the dst into tmp2 when there is a dst crop
1880                // TODO: this could be optimized by changing convert_format to take a
1881                // destination crop?
1882
1883                Self::convert_format(dst, &mut tmp2)?;
1884            }
1885            self.resize_flip_rotate(tmp, &mut tmp2, rotation, flip, crop)?;
1886            Self::convert_format(&tmp2, dst)?;
1887        }
1888        if let (Some(dst_rect), Some(dst_color)) = (crop.dst_rect, crop.dst_color) {
1889            let full_rect = Rect {
1890                left: 0,
1891                top: 0,
1892                width: dst.width(),
1893                height: dst.height(),
1894            };
1895            if dst_rect != full_rect {
1896                Self::fill_image_outside_crop(dst, dst_color, dst_rect)?;
1897            }
1898        }
1899
1900        Ok(())
1901    }
1902
1903    fn convert_ref(
1904        &mut self,
1905        src: &TensorImage,
1906        dst: &mut TensorImageRef<'_>,
1907        rotation: Rotation,
1908        flip: Flip,
1909        crop: Crop,
1910    ) -> Result<()> {
1911        crop.check_crop_ref(src, dst)?;
1912
1913        // Determine intermediate format needed for conversion
1914        let intermediate = match (src.fourcc(), dst.fourcc()) {
1915            (NV12, RGB) => RGB,
1916            (NV12, RGBA) => RGBA,
1917            (NV12, GREY) => GREY,
1918            (NV12, PLANAR_RGB) => RGB,
1919            (NV12, PLANAR_RGBA) => RGBA,
1920            (YUYV, RGB) => RGB,
1921            (YUYV, RGBA) => RGBA,
1922            (YUYV, GREY) => GREY,
1923            (YUYV, PLANAR_RGB) => RGB,
1924            (YUYV, PLANAR_RGBA) => RGBA,
1925            (RGBA, RGB) => RGBA,
1926            (RGBA, RGBA) => RGBA,
1927            (RGBA, GREY) => GREY,
1928            (RGBA, PLANAR_RGB) => RGBA,
1929            (RGBA, PLANAR_RGBA) => RGBA,
1930            (RGB, RGB) => RGB,
1931            (RGB, RGBA) => RGB,
1932            (RGB, GREY) => GREY,
1933            (RGB, PLANAR_RGB) => RGB,
1934            (RGB, PLANAR_RGBA) => RGB,
1935            (GREY, RGB) => RGB,
1936            (GREY, RGBA) => RGBA,
1937            (GREY, GREY) => GREY,
1938            (GREY, PLANAR_RGB) => GREY,
1939            (GREY, PLANAR_RGBA) => GREY,
1940            (s, d) => {
1941                return Err(Error::NotSupported(format!(
1942                    "Conversion from {} to {}",
1943                    s.display(),
1944                    d.display()
1945                )));
1946            }
1947        };
1948
1949        let need_resize_flip_rotation = rotation != Rotation::None
1950            || flip != Flip::None
1951            || src.width() != dst.width()
1952            || src.height() != dst.height()
1953            || crop.src_rect.is_some_and(|crop| {
1954                crop != Rect {
1955                    left: 0,
1956                    top: 0,
1957                    width: src.width(),
1958                    height: src.height(),
1959                }
1960            })
1961            || crop.dst_rect.is_some_and(|crop| {
1962                crop != Rect {
1963                    left: 0,
1964                    top: 0,
1965                    width: dst.width(),
1966                    height: dst.height(),
1967                }
1968            });
1969
1970        // Simple case: no resize/flip/rotation needed
1971        if !need_resize_flip_rotation {
1972            // Try direct generic conversion (zero-copy path)
1973            if let Ok(()) = Self::convert_format_generic(src, dst) {
1974                return Ok(());
1975            }
1976        }
1977
1978        // Complex case: need intermediate buffers
1979        // First, convert source to intermediate format if needed
1980        let mut tmp_buffer;
1981        let tmp: &TensorImage;
1982        if intermediate != src.fourcc() {
1983            tmp_buffer = TensorImage::new(
1984                src.width(),
1985                src.height(),
1986                intermediate,
1987                Some(edgefirst_tensor::TensorMemory::Mem),
1988            )?;
1989            Self::convert_format(src, &mut tmp_buffer)?;
1990            tmp = &tmp_buffer;
1991        } else {
1992            tmp = src;
1993        }
1994
1995        // Process resize/flip/rotation if needed
1996        if need_resize_flip_rotation {
1997            // Create intermediate buffer for resize output
1998            let mut tmp2 = TensorImage::new(
1999                dst.width(),
2000                dst.height(),
2001                tmp.fourcc(),
2002                Some(edgefirst_tensor::TensorMemory::Mem),
2003            )?;
2004            self.resize_flip_rotate(tmp, &mut tmp2, rotation, flip, crop)?;
2005
2006            // Final conversion to destination (zero-copy into dst)
2007            Self::convert_format_generic(&tmp2, dst)?;
2008        } else {
2009            // Direct conversion (already checked above, but handle edge cases)
2010            Self::convert_format_generic(tmp, dst)?;
2011        }
2012
2013        // Handle destination crop fill if needed
2014        if let (Some(dst_rect), Some(dst_color)) = (crop.dst_rect, crop.dst_color) {
2015            let full_rect = Rect {
2016                left: 0,
2017                top: 0,
2018                width: dst.width(),
2019                height: dst.height(),
2020            };
2021            if dst_rect != full_rect {
2022                Self::fill_image_outside_crop_generic(dst, dst_color, dst_rect)?;
2023            }
2024        }
2025
2026        Ok(())
2027    }
2028
2029    #[cfg(feature = "decoder")]
2030    fn render_to_image(
2031        &mut self,
2032        dst: &mut TensorImage,
2033        detect: &[DetectBox],
2034        segmentation: &[Segmentation],
2035    ) -> Result<()> {
2036        if !matches!(dst.fourcc(), RGBA | RGB) {
2037            return Err(crate::Error::NotSupported(
2038                "CPU image rendering only supports RGBA or RGB images".to_string(),
2039            ));
2040        }
2041
2042        let _timer = FunctionTimer::new("CPUProcessor::render_to_image");
2043
2044        let mut map = dst.tensor.map()?;
2045        let dst_slice = map.as_mut_slice();
2046
2047        self.render_box(dst, dst_slice, detect)?;
2048
2049        if segmentation.is_empty() {
2050            return Ok(());
2051        }
2052
2053        let is_modelpack = segmentation[0].segmentation.shape()[2] > 1;
2054
2055        if is_modelpack {
2056            self.render_modelpack_segmentation(dst, dst_slice, &segmentation[0])?;
2057        } else {
2058            for (seg, detect) in segmentation.iter().zip(detect) {
2059                self.render_yolo_segmentation(dst, dst_slice, seg, detect.label)?;
2060            }
2061        }
2062
2063        Ok(())
2064    }
2065
2066    #[cfg(feature = "decoder")]
2067    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
2068        for (c, new_c) in self.colors.iter_mut().zip(colors.iter()) {
2069            *c = *new_c;
2070        }
2071        Ok(())
2072    }
2073}
2074
2075#[cfg(test)]
2076#[cfg_attr(coverage_nightly, coverage(off))]
2077mod cpu_tests {
2078
2079    use super::*;
2080    use crate::{CPUProcessor, Rotation, TensorImageRef, RGBA};
2081    use edgefirst_tensor::{Tensor, TensorMapTrait, TensorMemory};
2082    use image::buffer::ConvertBuffer;
2083
2084    macro_rules! function {
2085        () => {{
2086            fn f() {}
2087            fn type_name_of<T>(_: T) -> &'static str {
2088                std::any::type_name::<T>()
2089            }
2090            let name = type_name_of(f);
2091
2092            // Find and cut the rest of the path
2093            match &name[..name.len() - 3].rfind(':') {
2094                Some(pos) => &name[pos + 1..name.len() - 3],
2095                None => &name[..name.len() - 3],
2096            }
2097        }};
2098    }
2099
2100    fn compare_images_convert_to_grey(
2101        img1: &TensorImage,
2102        img2: &TensorImage,
2103        threshold: f64,
2104        name: &str,
2105    ) {
2106        assert_eq!(img1.height(), img2.height(), "Heights differ");
2107        assert_eq!(img1.width(), img2.width(), "Widths differ");
2108
2109        let mut img_rgb1 = TensorImage::new(img1.width(), img1.height(), RGBA, None).unwrap();
2110        let mut img_rgb2 = TensorImage::new(img1.width(), img1.height(), RGBA, None).unwrap();
2111        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
2112        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
2113
2114        let image1 = image::RgbaImage::from_vec(
2115            img_rgb1.width() as u32,
2116            img_rgb1.height() as u32,
2117            img_rgb1.tensor().map().unwrap().to_vec(),
2118        )
2119        .unwrap();
2120
2121        let image2 = image::RgbaImage::from_vec(
2122            img_rgb2.width() as u32,
2123            img_rgb2.height() as u32,
2124            img_rgb2.tensor().map().unwrap().to_vec(),
2125        )
2126        .unwrap();
2127
2128        let similarity = image_compare::gray_similarity_structure(
2129            &image_compare::Algorithm::RootMeanSquared,
2130            &image1.convert(),
2131            &image2.convert(),
2132        )
2133        .expect("Image Comparison failed");
2134        if similarity.score < threshold {
2135            // image1.save(format!("{name}_1.png"));
2136            // image2.save(format!("{name}_2.png"));
2137            similarity
2138                .image
2139                .to_color_map()
2140                .save(format!("{name}.png"))
2141                .unwrap();
2142            panic!(
2143                "{name}: converted image and target image have similarity score too low: {} < {}",
2144                similarity.score, threshold
2145            )
2146        }
2147    }
2148
2149    fn compare_images_convert_to_rgb(
2150        img1: &TensorImage,
2151        img2: &TensorImage,
2152        threshold: f64,
2153        name: &str,
2154    ) {
2155        assert_eq!(img1.height(), img2.height(), "Heights differ");
2156        assert_eq!(img1.width(), img2.width(), "Widths differ");
2157
2158        let mut img_rgb1 = TensorImage::new(img1.width(), img1.height(), RGB, None).unwrap();
2159        let mut img_rgb2 = TensorImage::new(img1.width(), img1.height(), RGB, None).unwrap();
2160        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
2161        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
2162
2163        let image1 = image::RgbImage::from_vec(
2164            img_rgb1.width() as u32,
2165            img_rgb1.height() as u32,
2166            img_rgb1.tensor().map().unwrap().to_vec(),
2167        )
2168        .unwrap();
2169
2170        let image2 = image::RgbImage::from_vec(
2171            img_rgb2.width() as u32,
2172            img_rgb2.height() as u32,
2173            img_rgb2.tensor().map().unwrap().to_vec(),
2174        )
2175        .unwrap();
2176
2177        let similarity = image_compare::rgb_similarity_structure(
2178            &image_compare::Algorithm::RootMeanSquared,
2179            &image1,
2180            &image2,
2181        )
2182        .expect("Image Comparison failed");
2183        if similarity.score < threshold {
2184            // image1.save(format!("{name}_1.png"));
2185            // image2.save(format!("{name}_2.png"));
2186            similarity
2187                .image
2188                .to_color_map()
2189                .save(format!("{name}.png"))
2190                .unwrap();
2191            panic!(
2192                "{name}: converted image and target image have similarity score too low: {} < {}",
2193                similarity.score, threshold
2194            )
2195        }
2196    }
2197
2198    fn load_bytes_to_tensor(
2199        width: usize,
2200        height: usize,
2201        fourcc: FourCharCode,
2202        memory: Option<TensorMemory>,
2203        bytes: &[u8],
2204    ) -> Result<TensorImage, Error> {
2205        log::debug!("Current function is {}", function!());
2206        let src = TensorImage::new(width, height, fourcc, memory)?;
2207        src.tensor().map()?.as_mut_slice()[0..bytes.len()].copy_from_slice(bytes);
2208        Ok(src)
2209    }
2210
2211    macro_rules! generate_conversion_tests {
2212        (
2213        $src_fmt:ident,  $src_file:expr, $dst_fmt:ident, $dst_file:expr
2214    ) => {{
2215            // Load source
2216            let src = load_bytes_to_tensor(
2217                1280,
2218                720,
2219                $src_fmt,
2220                None,
2221                include_bytes!(concat!("../../../testdata/", $src_file)),
2222            )?;
2223
2224            // Load destination reference
2225            let dst = load_bytes_to_tensor(
2226                1280,
2227                720,
2228                $dst_fmt,
2229                None,
2230                include_bytes!(concat!("../../../testdata/", $dst_file)),
2231            )?;
2232
2233            let mut converter = CPUProcessor::default();
2234
2235            let mut converted = TensorImage::new(src.width(), src.height(), dst.fourcc(), None)?;
2236
2237            converter.convert(
2238                &src,
2239                &mut converted,
2240                Rotation::None,
2241                Flip::None,
2242                Crop::default(),
2243            )?;
2244
2245            compare_images_convert_to_rgb(&dst, &converted, 0.99, function!());
2246
2247            Ok(())
2248        }};
2249    }
2250
2251    macro_rules! generate_conversion_tests_greyscale {
2252        (
2253        $src_fmt:ident,  $src_file:expr, $dst_fmt:ident, $dst_file:expr
2254    ) => {{
2255            // Load source
2256            let src = load_bytes_to_tensor(
2257                1280,
2258                720,
2259                $src_fmt,
2260                None,
2261                include_bytes!(concat!("../../../testdata/", $src_file)),
2262            )?;
2263
2264            // Load destination reference
2265            let dst = load_bytes_to_tensor(
2266                1280,
2267                720,
2268                $dst_fmt,
2269                None,
2270                include_bytes!(concat!("../../../testdata/", $dst_file)),
2271            )?;
2272
2273            let mut converter = CPUProcessor::default();
2274
2275            let mut converted = TensorImage::new(src.width(), src.height(), dst.fourcc(), None)?;
2276
2277            converter.convert(
2278                &src,
2279                &mut converted,
2280                Rotation::None,
2281                Flip::None,
2282                Crop::default(),
2283            )?;
2284
2285            compare_images_convert_to_grey(&dst, &converted, 0.985, function!());
2286
2287            Ok(())
2288        }};
2289    }
2290
2291    // let mut dsts = [yuyv, rgb, rgba, grey, nv16, planar_rgb, planar_rgba];
2292
2293    #[test]
2294    fn test_cpu_yuyv_to_yuyv() -> Result<()> {
2295        generate_conversion_tests!(YUYV, "camera720p.yuyv", YUYV, "camera720p.yuyv")
2296    }
2297
2298    #[test]
2299    fn test_cpu_yuyv_to_rgb() -> Result<()> {
2300        generate_conversion_tests!(YUYV, "camera720p.yuyv", RGB, "camera720p.rgb")
2301    }
2302
2303    #[test]
2304    fn test_cpu_yuyv_to_rgba() -> Result<()> {
2305        generate_conversion_tests!(YUYV, "camera720p.yuyv", RGBA, "camera720p.rgba")
2306    }
2307
2308    #[test]
2309    fn test_cpu_yuyv_to_grey() -> Result<()> {
2310        generate_conversion_tests!(YUYV, "camera720p.yuyv", GREY, "camera720p.y800")
2311    }
2312
2313    #[test]
2314    fn test_cpu_yuyv_to_nv16() -> Result<()> {
2315        generate_conversion_tests!(YUYV, "camera720p.yuyv", NV16, "camera720p.nv16")
2316    }
2317
2318    #[test]
2319    fn test_cpu_yuyv_to_planar_rgb() -> Result<()> {
2320        generate_conversion_tests!(YUYV, "camera720p.yuyv", PLANAR_RGB, "camera720p.8bps")
2321    }
2322
2323    #[test]
2324    fn test_cpu_yuyv_to_planar_rgba() -> Result<()> {
2325        generate_conversion_tests!(YUYV, "camera720p.yuyv", PLANAR_RGBA, "camera720p.8bpa")
2326    }
2327
2328    #[test]
2329    fn test_cpu_rgb_to_yuyv() -> Result<()> {
2330        generate_conversion_tests!(RGB, "camera720p.rgb", YUYV, "camera720p.yuyv")
2331    }
2332
2333    #[test]
2334    fn test_cpu_rgb_to_rgb() -> Result<()> {
2335        generate_conversion_tests!(RGB, "camera720p.rgb", RGB, "camera720p.rgb")
2336    }
2337
2338    #[test]
2339    fn test_cpu_rgb_to_rgba() -> Result<()> {
2340        generate_conversion_tests!(RGB, "camera720p.rgb", RGBA, "camera720p.rgba")
2341    }
2342
2343    #[test]
2344    fn test_cpu_rgb_to_grey() -> Result<()> {
2345        generate_conversion_tests!(RGB, "camera720p.rgb", GREY, "camera720p.y800")
2346    }
2347
2348    #[test]
2349    fn test_cpu_rgb_to_nv16() -> Result<()> {
2350        generate_conversion_tests!(RGB, "camera720p.rgb", NV16, "camera720p.nv16")
2351    }
2352
2353    #[test]
2354    fn test_cpu_rgb_to_planar_rgb() -> Result<()> {
2355        generate_conversion_tests!(RGB, "camera720p.rgb", PLANAR_RGB, "camera720p.8bps")
2356    }
2357
2358    #[test]
2359    fn test_cpu_rgb_to_planar_rgba() -> Result<()> {
2360        generate_conversion_tests!(RGB, "camera720p.rgb", PLANAR_RGBA, "camera720p.8bpa")
2361    }
2362
2363    #[test]
2364    fn test_cpu_rgba_to_yuyv() -> Result<()> {
2365        generate_conversion_tests!(RGBA, "camera720p.rgba", YUYV, "camera720p.yuyv")
2366    }
2367
2368    #[test]
2369    fn test_cpu_rgba_to_rgb() -> Result<()> {
2370        generate_conversion_tests!(RGBA, "camera720p.rgba", RGB, "camera720p.rgb")
2371    }
2372
2373    #[test]
2374    fn test_cpu_rgba_to_rgba() -> Result<()> {
2375        generate_conversion_tests!(RGBA, "camera720p.rgba", RGBA, "camera720p.rgba")
2376    }
2377
2378    #[test]
2379    fn test_cpu_rgba_to_grey() -> Result<()> {
2380        generate_conversion_tests!(RGBA, "camera720p.rgba", GREY, "camera720p.y800")
2381    }
2382
2383    #[test]
2384    fn test_cpu_rgba_to_nv16() -> Result<()> {
2385        generate_conversion_tests!(RGBA, "camera720p.rgba", NV16, "camera720p.nv16")
2386    }
2387
2388    #[test]
2389    fn test_cpu_rgba_to_planar_rgb() -> Result<()> {
2390        generate_conversion_tests!(RGBA, "camera720p.rgba", PLANAR_RGB, "camera720p.8bps")
2391    }
2392
2393    #[test]
2394    fn test_cpu_rgba_to_planar_rgba() -> Result<()> {
2395        generate_conversion_tests!(RGBA, "camera720p.rgba", PLANAR_RGBA, "camera720p.8bpa")
2396    }
2397
2398    #[test]
2399    fn test_cpu_nv12_to_rgb() -> Result<()> {
2400        generate_conversion_tests!(NV12, "camera720p.nv12", RGB, "camera720p.rgb")
2401    }
2402
2403    #[test]
2404    fn test_cpu_nv12_to_yuyv() -> Result<()> {
2405        generate_conversion_tests!(NV12, "camera720p.nv12", YUYV, "camera720p.yuyv")
2406    }
2407
2408    #[test]
2409    fn test_cpu_nv12_to_rgba() -> Result<()> {
2410        generate_conversion_tests!(NV12, "camera720p.nv12", RGBA, "camera720p.rgba")
2411    }
2412
2413    #[test]
2414    fn test_cpu_nv12_to_grey() -> Result<()> {
2415        generate_conversion_tests!(NV12, "camera720p.nv12", GREY, "camera720p.y800")
2416    }
2417
2418    #[test]
2419    fn test_cpu_nv12_to_nv16() -> Result<()> {
2420        generate_conversion_tests!(NV12, "camera720p.nv12", NV16, "camera720p.nv16")
2421    }
2422
2423    #[test]
2424    fn test_cpu_nv12_to_planar_rgb() -> Result<()> {
2425        generate_conversion_tests!(NV12, "camera720p.nv12", PLANAR_RGB, "camera720p.8bps")
2426    }
2427
2428    #[test]
2429    fn test_cpu_nv12_to_planar_rgba() -> Result<()> {
2430        generate_conversion_tests!(NV12, "camera720p.nv12", PLANAR_RGBA, "camera720p.8bpa")
2431    }
2432
2433    #[test]
2434    fn test_cpu_grey_to_yuyv() -> Result<()> {
2435        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", YUYV, "camera720p.yuyv")
2436    }
2437
2438    #[test]
2439    fn test_cpu_grey_to_rgb() -> Result<()> {
2440        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", RGB, "camera720p.rgb")
2441    }
2442
2443    #[test]
2444    fn test_cpu_grey_to_rgba() -> Result<()> {
2445        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", RGBA, "camera720p.rgba")
2446    }
2447
2448    #[test]
2449    fn test_cpu_grey_to_grey() -> Result<()> {
2450        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", GREY, "camera720p.y800")
2451    }
2452
2453    #[test]
2454    fn test_cpu_grey_to_nv16() -> Result<()> {
2455        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", NV16, "camera720p.nv16")
2456    }
2457
2458    #[test]
2459    fn test_cpu_grey_to_planar_rgb() -> Result<()> {
2460        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", PLANAR_RGB, "camera720p.8bps")
2461    }
2462
2463    #[test]
2464    fn test_cpu_grey_to_planar_rgba() -> Result<()> {
2465        generate_conversion_tests_greyscale!(
2466            GREY,
2467            "camera720p.y800",
2468            PLANAR_RGBA,
2469            "camera720p.8bpa"
2470        )
2471    }
2472
2473    #[test]
2474    fn test_cpu_nearest() -> Result<()> {
2475        // Load source
2476        let src = load_bytes_to_tensor(2, 1, RGB, None, &[0, 0, 0, 255, 255, 255])?;
2477
2478        let mut converter = CPUProcessor::new_nearest();
2479
2480        let mut converted = TensorImage::new(4, 1, RGB, None)?;
2481
2482        converter.convert(
2483            &src,
2484            &mut converted,
2485            Rotation::None,
2486            Flip::None,
2487            Crop::default(),
2488        )?;
2489
2490        assert_eq!(
2491            &converted.tensor().map()?.as_slice(),
2492            &[0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255]
2493        );
2494
2495        Ok(())
2496    }
2497
2498    #[test]
2499    fn test_cpu_rotate_cw() -> Result<()> {
2500        // Load source
2501        let src = load_bytes_to_tensor(
2502            2,
2503            2,
2504            RGBA,
2505            None,
2506            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2507        )?;
2508
2509        let mut converter = CPUProcessor::default();
2510
2511        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2512
2513        converter.convert(
2514            &src,
2515            &mut converted,
2516            Rotation::Clockwise90,
2517            Flip::None,
2518            Crop::default(),
2519        )?;
2520
2521        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[2, 2, 2, 255]);
2522        assert_eq!(
2523            &converted.tensor().map()?.as_slice()[12..16],
2524            &[0, 0, 0, 255]
2525        );
2526        assert_eq!(
2527            &converted.tensor().map()?.as_slice()[48..52],
2528            &[3, 3, 3, 255]
2529        );
2530
2531        assert_eq!(
2532            &converted.tensor().map()?.as_slice()[60..64],
2533            &[1, 1, 1, 255]
2534        );
2535
2536        Ok(())
2537    }
2538
2539    #[test]
2540    fn test_cpu_rotate_ccw() -> Result<()> {
2541        // Load source
2542        let src = load_bytes_to_tensor(
2543            2,
2544            2,
2545            RGBA,
2546            None,
2547            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2548        )?;
2549
2550        let mut converter = CPUProcessor::default();
2551
2552        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2553
2554        converter.convert(
2555            &src,
2556            &mut converted,
2557            Rotation::CounterClockwise90,
2558            Flip::None,
2559            Crop::default(),
2560        )?;
2561
2562        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[1, 1, 1, 255]);
2563        assert_eq!(
2564            &converted.tensor().map()?.as_slice()[12..16],
2565            &[3, 3, 3, 255]
2566        );
2567        assert_eq!(
2568            &converted.tensor().map()?.as_slice()[48..52],
2569            &[0, 0, 0, 255]
2570        );
2571
2572        assert_eq!(
2573            &converted.tensor().map()?.as_slice()[60..64],
2574            &[2, 2, 2, 255]
2575        );
2576
2577        Ok(())
2578    }
2579
2580    #[test]
2581    fn test_cpu_rotate_180() -> Result<()> {
2582        // Load source
2583        let src = load_bytes_to_tensor(
2584            2,
2585            2,
2586            RGBA,
2587            None,
2588            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2589        )?;
2590
2591        let mut converter = CPUProcessor::default();
2592
2593        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2594
2595        converter.convert(
2596            &src,
2597            &mut converted,
2598            Rotation::Rotate180,
2599            Flip::None,
2600            Crop::default(),
2601        )?;
2602
2603        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[3, 3, 3, 255]);
2604        assert_eq!(
2605            &converted.tensor().map()?.as_slice()[12..16],
2606            &[2, 2, 2, 255]
2607        );
2608        assert_eq!(
2609            &converted.tensor().map()?.as_slice()[48..52],
2610            &[1, 1, 1, 255]
2611        );
2612
2613        assert_eq!(
2614            &converted.tensor().map()?.as_slice()[60..64],
2615            &[0, 0, 0, 255]
2616        );
2617
2618        Ok(())
2619    }
2620
2621    #[test]
2622    fn test_cpu_flip_v() -> Result<()> {
2623        // Load source
2624        let src = load_bytes_to_tensor(
2625            2,
2626            2,
2627            RGBA,
2628            None,
2629            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2630        )?;
2631
2632        let mut converter = CPUProcessor::default();
2633
2634        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2635
2636        converter.convert(
2637            &src,
2638            &mut converted,
2639            Rotation::None,
2640            Flip::Vertical,
2641            Crop::default(),
2642        )?;
2643
2644        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[2, 2, 2, 255]);
2645        assert_eq!(
2646            &converted.tensor().map()?.as_slice()[12..16],
2647            &[3, 3, 3, 255]
2648        );
2649        assert_eq!(
2650            &converted.tensor().map()?.as_slice()[48..52],
2651            &[0, 0, 0, 255]
2652        );
2653
2654        assert_eq!(
2655            &converted.tensor().map()?.as_slice()[60..64],
2656            &[1, 1, 1, 255]
2657        );
2658
2659        Ok(())
2660    }
2661
2662    #[test]
2663    fn test_cpu_flip_h() -> Result<()> {
2664        // Load source
2665        let src = load_bytes_to_tensor(
2666            2,
2667            2,
2668            RGBA,
2669            None,
2670            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2671        )?;
2672
2673        let mut converter = CPUProcessor::default();
2674
2675        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2676
2677        converter.convert(
2678            &src,
2679            &mut converted,
2680            Rotation::None,
2681            Flip::Horizontal,
2682            Crop::default(),
2683        )?;
2684
2685        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[1, 1, 1, 255]);
2686        assert_eq!(
2687            &converted.tensor().map()?.as_slice()[12..16],
2688            &[0, 0, 0, 255]
2689        );
2690        assert_eq!(
2691            &converted.tensor().map()?.as_slice()[48..52],
2692            &[3, 3, 3, 255]
2693        );
2694
2695        assert_eq!(
2696            &converted.tensor().map()?.as_slice()[60..64],
2697            &[2, 2, 2, 255]
2698        );
2699
2700        Ok(())
2701    }
2702
2703    #[test]
2704    fn test_cpu_src_crop() -> Result<()> {
2705        // Load source
2706        let src = load_bytes_to_tensor(2, 2, GREY, None, &[10, 20, 30, 40])?;
2707
2708        let mut converter = CPUProcessor::default();
2709
2710        let mut converted = TensorImage::new(2, 2, RGBA, None)?;
2711
2712        converter.convert(
2713            &src,
2714            &mut converted,
2715            Rotation::None,
2716            Flip::None,
2717            Crop::new().with_src_rect(Some(Rect::new(0, 0, 1, 2))),
2718        )?;
2719
2720        assert_eq!(
2721            converted.tensor().map()?.as_slice(),
2722            &[10, 10, 10, 255, 13, 13, 13, 255, 30, 30, 30, 255, 33, 33, 33, 255]
2723        );
2724        Ok(())
2725    }
2726
2727    #[test]
2728    fn test_cpu_dst_crop() -> Result<()> {
2729        // Load source
2730        let src = load_bytes_to_tensor(2, 2, GREY, None, &[2, 4, 6, 8])?;
2731
2732        let mut converter = CPUProcessor::default();
2733
2734        let mut converted =
2735            load_bytes_to_tensor(2, 2, YUYV, None, &[200, 128, 200, 128, 200, 128, 200, 128])?;
2736
2737        converter.convert(
2738            &src,
2739            &mut converted,
2740            Rotation::None,
2741            Flip::None,
2742            Crop::new().with_dst_rect(Some(Rect::new(0, 0, 2, 1))),
2743        )?;
2744
2745        assert_eq!(
2746            converted.tensor().map()?.as_slice(),
2747            &[20, 128, 21, 128, 200, 128, 200, 128]
2748        );
2749        Ok(())
2750    }
2751
2752    #[test]
2753    fn test_cpu_fill_rgba() -> Result<()> {
2754        // Load source
2755        let src = load_bytes_to_tensor(1, 1, RGBA, None, &[3, 3, 3, 255])?;
2756
2757        let mut converter = CPUProcessor::default();
2758
2759        let mut converted = TensorImage::new(2, 2, RGBA, None)?;
2760
2761        converter.convert(
2762            &src,
2763            &mut converted,
2764            Rotation::None,
2765            Flip::None,
2766            Crop {
2767                src_rect: None,
2768                dst_rect: Some(Rect {
2769                    left: 1,
2770                    top: 1,
2771                    width: 1,
2772                    height: 1,
2773                }),
2774                dst_color: Some([255, 0, 0, 255]),
2775            },
2776        )?;
2777
2778        assert_eq!(
2779            converted.tensor().map()?.as_slice(),
2780            &[255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 3, 3, 3, 255]
2781        );
2782        Ok(())
2783    }
2784
2785    #[test]
2786    fn test_cpu_fill_yuyv() -> Result<()> {
2787        // Load source
2788        let src = load_bytes_to_tensor(2, 1, RGBA, None, &[3, 3, 3, 255, 3, 3, 3, 255])?;
2789
2790        let mut converter = CPUProcessor::default();
2791
2792        let mut converted = TensorImage::new(2, 3, YUYV, None)?;
2793
2794        converter.convert(
2795            &src,
2796            &mut converted,
2797            Rotation::None,
2798            Flip::None,
2799            Crop {
2800                src_rect: None,
2801                dst_rect: Some(Rect {
2802                    left: 0,
2803                    top: 1,
2804                    width: 2,
2805                    height: 1,
2806                }),
2807                dst_color: Some([255, 0, 0, 255]),
2808            },
2809        )?;
2810
2811        assert_eq!(
2812            converted.tensor().map()?.as_slice(),
2813            &[63, 102, 63, 240, 19, 128, 19, 128, 63, 102, 63, 240]
2814        );
2815        Ok(())
2816    }
2817
2818    #[test]
2819    fn test_cpu_fill_grey() -> Result<()> {
2820        // Load source
2821        let src = load_bytes_to_tensor(2, 1, RGBA, None, &[3, 3, 3, 255, 3, 3, 3, 255])?;
2822
2823        let mut converter = CPUProcessor::default();
2824
2825        let mut converted = TensorImage::new(2, 3, GREY, None)?;
2826
2827        converter.convert(
2828            &src,
2829            &mut converted,
2830            Rotation::None,
2831            Flip::None,
2832            Crop {
2833                src_rect: None,
2834                dst_rect: Some(Rect {
2835                    left: 0,
2836                    top: 1,
2837                    width: 2,
2838                    height: 1,
2839                }),
2840                dst_color: Some([200, 200, 200, 255]),
2841            },
2842        )?;
2843
2844        assert_eq!(
2845            converted.tensor().map()?.as_slice(),
2846            &[200, 200, 3, 3, 200, 200]
2847        );
2848        Ok(())
2849    }
2850
2851    #[test]
2852    #[cfg(feature = "decoder")]
2853    fn test_segmentation() {
2854        use edgefirst_decoder::Segmentation;
2855        use ndarray::Array3;
2856
2857        let mut image = TensorImage::load(
2858            include_bytes!("../../../testdata/giraffe.jpg"),
2859            Some(RGBA),
2860            None,
2861        )
2862        .unwrap();
2863
2864        let mut segmentation = Array3::from_shape_vec(
2865            (2, 160, 160),
2866            include_bytes!("../../../testdata/modelpack_seg_2x160x160.bin").to_vec(),
2867        )
2868        .unwrap();
2869        segmentation.swap_axes(0, 1);
2870        segmentation.swap_axes(1, 2);
2871        let segmentation = segmentation.as_standard_layout().to_owned();
2872
2873        let seg = Segmentation {
2874            segmentation,
2875            xmin: 0.0,
2876            ymin: 0.0,
2877            xmax: 1.0,
2878            ymax: 1.0,
2879        };
2880
2881        let mut renderer = CPUProcessor::new();
2882        renderer.render_to_image(&mut image, &[], &[seg]).unwrap();
2883
2884        image.save_jpeg("test_segmentation.jpg", 80).unwrap();
2885    }
2886
2887    #[test]
2888    #[cfg(feature = "decoder")]
2889    fn test_segmentation_yolo() {
2890        use edgefirst_decoder::Segmentation;
2891        use ndarray::Array3;
2892
2893        let mut image = TensorImage::load(
2894            include_bytes!("../../../testdata/giraffe.jpg"),
2895            Some(RGBA),
2896            None,
2897        )
2898        .unwrap();
2899
2900        let segmentation = Array3::from_shape_vec(
2901            (76, 55, 1),
2902            include_bytes!("../../../testdata/yolov8_seg_crop_76x55.bin").to_vec(),
2903        )
2904        .unwrap();
2905
2906        let detect = DetectBox {
2907            bbox: [0.59375, 0.25, 0.9375, 0.725].into(),
2908            score: 0.99,
2909            label: 1,
2910        };
2911
2912        let seg = Segmentation {
2913            segmentation,
2914            xmin: 0.59375,
2915            ymin: 0.25,
2916            xmax: 0.9375,
2917            ymax: 0.725,
2918        };
2919
2920        let mut renderer = CPUProcessor::new();
2921        renderer
2922            .set_class_colors(&[[255, 255, 0, 233], [128, 128, 255, 100]])
2923            .unwrap();
2924        assert_eq!(renderer.colors[1], [128, 128, 255, 100]);
2925        renderer
2926            .render_to_image(&mut image, &[detect], &[seg])
2927            .unwrap();
2928        let expected = TensorImage::load(
2929            include_bytes!("../../../testdata/output_render_cpu.jpg"),
2930            Some(RGBA),
2931            None,
2932        )
2933        .unwrap();
2934        compare_images_convert_to_rgb(&image, &expected, 0.99, function!());
2935    }
2936
2937    // =========================================================================
2938    // Generic Conversion Tests (TensorImageRef support)
2939    // =========================================================================
2940
2941    #[test]
2942    fn test_convert_rgb_to_planar_rgb_generic() {
2943        // Create RGB source image
2944        let mut src = TensorImage::new(4, 4, RGB, None).unwrap();
2945        {
2946            let mut map = src.tensor_mut().map().unwrap();
2947            let data = map.as_mut_slice();
2948            // Fill with pattern: pixel 0 = [10, 20, 30], pixel 1 = [40, 50, 60], etc.
2949            for i in 0..16 {
2950                data[i * 3] = (i * 10) as u8;
2951                data[i * 3 + 1] = (i * 10 + 1) as u8;
2952                data[i * 3 + 2] = (i * 10 + 2) as u8;
2953            }
2954        }
2955
2956        // Create planar RGB destination using TensorImageRef
2957        let mut tensor = Tensor::<u8>::new(&[3, 4, 4], None, None).unwrap();
2958        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
2959
2960        CPUProcessor::convert_format_generic(&src, &mut dst).unwrap();
2961
2962        // Verify the conversion - check first few pixels of each plane
2963        let map = dst.tensor().map().unwrap();
2964        let data = map.as_slice();
2965
2966        // R plane starts at 0, G at 16, B at 32
2967        assert_eq!(data[0], 0); // R of pixel 0
2968        assert_eq!(data[16], 1); // G of pixel 0
2969        assert_eq!(data[32], 2); // B of pixel 0
2970
2971        assert_eq!(data[1], 10); // R of pixel 1
2972        assert_eq!(data[17], 11); // G of pixel 1
2973        assert_eq!(data[33], 12); // B of pixel 1
2974    }
2975
2976    #[test]
2977    fn test_convert_rgba_to_planar_rgb_generic() {
2978        // Create RGBA source image
2979        let mut src = TensorImage::new(4, 4, RGBA, None).unwrap();
2980        {
2981            let mut map = src.tensor_mut().map().unwrap();
2982            let data = map.as_mut_slice();
2983            // Fill with pattern
2984            for i in 0..16 {
2985                data[i * 4] = (i * 10) as u8; // R
2986                data[i * 4 + 1] = (i * 10 + 1) as u8; // G
2987                data[i * 4 + 2] = (i * 10 + 2) as u8; // B
2988                data[i * 4 + 3] = 255; // A (ignored)
2989            }
2990        }
2991
2992        // Create planar RGB destination
2993        let mut tensor = Tensor::<u8>::new(&[3, 4, 4], None, None).unwrap();
2994        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
2995
2996        CPUProcessor::convert_format_generic(&src, &mut dst).unwrap();
2997
2998        // Verify the conversion
2999        let map = dst.tensor().map().unwrap();
3000        let data = map.as_slice();
3001
3002        assert_eq!(data[0], 0); // R of pixel 0
3003        assert_eq!(data[16], 1); // G of pixel 0
3004        assert_eq!(data[32], 2); // B of pixel 0
3005    }
3006
3007    #[test]
3008    fn test_copy_image_generic_same_format() {
3009        // Create source image with data
3010        let mut src = TensorImage::new(4, 4, RGB, None).unwrap();
3011        {
3012            let mut map = src.tensor_mut().map().unwrap();
3013            let data = map.as_mut_slice();
3014            for (i, byte) in data.iter_mut().enumerate() {
3015                *byte = (i % 256) as u8;
3016            }
3017        }
3018
3019        // Create destination tensor
3020        let mut tensor = Tensor::<u8>::new(&[4, 4, 3], None, None).unwrap();
3021        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB).unwrap();
3022
3023        CPUProcessor::convert_format_generic(&src, &mut dst).unwrap();
3024
3025        // Verify data was copied
3026        let src_map = src.tensor().map().unwrap();
3027        let dst_map = dst.tensor().map().unwrap();
3028        assert_eq!(src_map.as_slice(), dst_map.as_slice());
3029    }
3030
3031    #[test]
3032    fn test_convert_format_generic_unsupported() {
3033        // Try unsupported conversion (NV12 to PLANAR_RGB)
3034        let src = TensorImage::new(8, 8, NV12, None).unwrap();
3035        let mut tensor = Tensor::<u8>::new(&[3, 8, 8], None, None).unwrap();
3036        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
3037
3038        let result = CPUProcessor::convert_format_generic(&src, &mut dst);
3039        assert!(result.is_err());
3040        assert!(matches!(result, Err(Error::NotSupported(_))));
3041    }
3042
3043    #[test]
3044    fn test_fill_image_outside_crop_generic_rgba() {
3045        let mut tensor = Tensor::<u8>::new(&[4, 4, 4], None, None).unwrap();
3046        // Initialize to zeros
3047        tensor.map().unwrap().as_mut_slice().fill(0);
3048
3049        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA).unwrap();
3050
3051        // Fill outside a 2x2 crop in the center with red
3052        let crop = Rect::new(1, 1, 2, 2);
3053        CPUProcessor::fill_image_outside_crop_generic(&mut dst, [255, 0, 0, 255], crop).unwrap();
3054
3055        let map = dst.tensor().map().unwrap();
3056        let data = map.as_slice();
3057
3058        // Top-left corner should be filled (red)
3059        assert_eq!(&data[0..4], &[255, 0, 0, 255]);
3060
3061        // Center pixel (1,1) should still be zero (inside crop)
3062        // row=1, col=1, width=4, bytes_per_pixel=4 -> offset = (1*4 + 1) * 4 = 20
3063        let center_offset = 20;
3064        assert_eq!(&data[center_offset..center_offset + 4], &[0, 0, 0, 0]);
3065    }
3066
3067    #[test]
3068    fn test_fill_image_outside_crop_generic_rgb() {
3069        let mut tensor = Tensor::<u8>::new(&[4, 4, 3], None, None).unwrap();
3070        tensor.map().unwrap().as_mut_slice().fill(0);
3071
3072        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB).unwrap();
3073
3074        let crop = Rect::new(1, 1, 2, 2);
3075        CPUProcessor::fill_image_outside_crop_generic(&mut dst, [0, 255, 0, 255], crop).unwrap();
3076
3077        let map = dst.tensor().map().unwrap();
3078        let data = map.as_slice();
3079
3080        // Top-left corner should be green
3081        assert_eq!(&data[0..3], &[0, 255, 0]);
3082
3083        // Center pixel (1,1): row=1, col=1, width=4, bytes=3 -> offset = (1*4 + 1) * 3
3084        // = 15
3085        let center_offset = 15;
3086        assert_eq!(&data[center_offset..center_offset + 3], &[0, 0, 0]);
3087    }
3088
3089    #[test]
3090    fn test_fill_image_outside_crop_generic_planar_rgb() {
3091        let mut tensor = Tensor::<u8>::new(&[3, 4, 4], None, None).unwrap();
3092        tensor.map().unwrap().as_mut_slice().fill(0);
3093
3094        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
3095
3096        let crop = Rect::new(1, 1, 2, 2);
3097        CPUProcessor::fill_image_outside_crop_generic(&mut dst, [128, 64, 32, 255], crop).unwrap();
3098
3099        let map = dst.tensor().map().unwrap();
3100        let data = map.as_slice();
3101
3102        // For planar: R plane is [0..16], G plane is [16..32], B plane is [32..48]
3103        // Top-left pixel (0,0) should have R=128, G=64, B=32
3104        assert_eq!(data[0], 128); // R plane, pixel 0
3105        assert_eq!(data[16], 64); // G plane, pixel 0
3106        assert_eq!(data[32], 32); // B plane, pixel 0
3107
3108        // Center pixel (1,1): row=1, col=1, width=4 -> index = 1*4 + 1 = 5
3109        let center_idx = 5;
3110        assert_eq!(data[center_idx], 0); // R
3111        assert_eq!(data[16 + center_idx], 0); // G
3112        assert_eq!(data[32 + center_idx], 0); // B
3113    }
3114}