Skip to main content

edgefirst_image/
cpu.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::{
5    Crop, Error, Flip, FunctionTimer, ImageProcessorTrait, Rect, Result, Rotation, TensorImage,
6    TensorImageDst, TensorImageRef, GREY, NV12, NV16, PLANAR_RGB, PLANAR_RGBA, RGB, RGBA, YUYV,
7};
8#[cfg(feature = "decoder")]
9use edgefirst_decoder::{DetectBox, Segmentation};
10use edgefirst_tensor::{TensorMapTrait, TensorTrait};
11use four_char_code::FourCharCode;
12use ndarray::{ArrayView3, ArrayViewMut3, Axis};
13use rayon::iter::{
14    IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator,
15};
16use std::ops::Shr;
17
18/// CPUConverter implements the ImageProcessor trait using the fallback CPU
19/// implementation for image processing.
20#[derive(Debug, Clone)]
21pub struct CPUProcessor {
22    resizer: fast_image_resize::Resizer,
23    options: fast_image_resize::ResizeOptions,
24    #[cfg(feature = "decoder")]
25    colors: [[u8; 4]; 20],
26}
27
28unsafe impl Send for CPUProcessor {}
29unsafe impl Sync for CPUProcessor {}
30
31#[inline(always)]
32fn limit_to_full(l: u8) -> u8 {
33    (((l as u16 - 16) * 255 + (240 - 16) / 2) / (240 - 16)) as u8
34}
35
36#[inline(always)]
37fn full_to_limit(l: u8) -> u8 {
38    ((l as u16 * (240 - 16) + 255 / 2) / 255 + 16) as u8
39}
40
41impl Default for CPUProcessor {
42    fn default() -> Self {
43        Self::new_bilinear()
44    }
45}
46
47impl CPUProcessor {
48    /// Creates a new CPUConverter with bilinear resizing.
49    pub fn new() -> Self {
50        Self::new_bilinear()
51    }
52
53    /// Creates a new CPUConverter with bilinear resizing.
54    fn new_bilinear() -> Self {
55        let resizer = fast_image_resize::Resizer::new();
56        let options = fast_image_resize::ResizeOptions::new()
57            .resize_alg(fast_image_resize::ResizeAlg::Convolution(
58                fast_image_resize::FilterType::Bilinear,
59            ))
60            .use_alpha(false);
61
62        log::debug!("CPUConverter created");
63        Self {
64            resizer,
65            options,
66            #[cfg(feature = "decoder")]
67            colors: crate::DEFAULT_COLORS_U8,
68        }
69    }
70
71    /// Creates a new CPUConverter with nearest neighbor resizing.
72    pub fn new_nearest() -> Self {
73        let resizer = fast_image_resize::Resizer::new();
74        let options = fast_image_resize::ResizeOptions::new()
75            .resize_alg(fast_image_resize::ResizeAlg::Nearest)
76            .use_alpha(false);
77        log::debug!("CPUConverter created");
78        Self {
79            resizer,
80            options,
81            #[cfg(feature = "decoder")]
82            colors: crate::DEFAULT_COLORS_U8,
83        }
84    }
85
86    pub(crate) fn flip_rotate_ndarray(
87        src_map: &[u8],
88        dst_map: &mut [u8],
89        dst: &TensorImage,
90        rotation: Rotation,
91        flip: Flip,
92    ) -> Result<(), crate::Error> {
93        let mut dst_view =
94            ArrayViewMut3::from_shape((dst.height(), dst.width(), dst.channels()), dst_map)?;
95        let mut src_view = match rotation {
96            Rotation::None | Rotation::Rotate180 => {
97                ArrayView3::from_shape((dst.height(), dst.width(), dst.channels()), src_map)?
98            }
99            Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
100                ArrayView3::from_shape((dst.width(), dst.height(), dst.channels()), src_map)?
101            }
102        };
103
104        match flip {
105            Flip::None => {}
106            Flip::Vertical => {
107                src_view.invert_axis(Axis(0));
108            }
109            Flip::Horizontal => {
110                src_view.invert_axis(Axis(1));
111            }
112        }
113
114        match rotation {
115            Rotation::None => {}
116            Rotation::Clockwise90 => {
117                src_view.swap_axes(0, 1);
118                src_view.invert_axis(Axis(1));
119            }
120            Rotation::Rotate180 => {
121                src_view.invert_axis(Axis(0));
122                src_view.invert_axis(Axis(1));
123            }
124            Rotation::CounterClockwise90 => {
125                src_view.swap_axes(0, 1);
126                src_view.invert_axis(Axis(0));
127            }
128        }
129
130        dst_view.assign(&src_view);
131
132        Ok(())
133    }
134
135    fn convert_nv12_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
136        assert_eq!(src.fourcc(), NV12);
137        assert_eq!(dst.fourcc(), RGB);
138        let map = src.tensor.map()?;
139        let y_stride = src.width() as u32;
140        let uv_stride = src.width() as u32;
141        let slices = map.as_slice().split_at(y_stride as usize * src.height());
142
143        let src = yuv::YuvBiPlanarImage {
144            y_plane: slices.0,
145            y_stride,
146            uv_plane: slices.1,
147            uv_stride,
148            width: src.width() as u32,
149            height: src.height() as u32,
150        };
151
152        Ok(yuv::yuv_nv12_to_rgb(
153            &src,
154            dst.tensor.map()?.as_mut_slice(),
155            dst.row_stride() as u32,
156            yuv::YuvRange::Limited,
157            yuv::YuvStandardMatrix::Bt709,
158            yuv::YuvConversionMode::Balanced,
159        )?)
160    }
161
162    fn convert_nv12_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
163        assert_eq!(src.fourcc(), NV12);
164        assert_eq!(dst.fourcc(), RGBA);
165        let map = src.tensor.map()?;
166        let y_stride = src.width() as u32;
167        let uv_stride = src.width() as u32;
168        let slices = map.as_slice().split_at(y_stride as usize * src.height());
169
170        let src = yuv::YuvBiPlanarImage {
171            y_plane: slices.0,
172            y_stride,
173            uv_plane: slices.1,
174            uv_stride,
175            width: src.width() as u32,
176            height: src.height() as u32,
177        };
178
179        Ok(yuv::yuv_nv12_to_rgba(
180            &src,
181            dst.tensor.map()?.as_mut_slice(),
182            dst.row_stride() as u32,
183            yuv::YuvRange::Limited,
184            yuv::YuvStandardMatrix::Bt709,
185            yuv::YuvConversionMode::Balanced,
186        )?)
187    }
188
189    fn convert_nv12_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
190        assert_eq!(src.fourcc(), NV12);
191        assert_eq!(dst.fourcc(), GREY);
192        let src_map = src.tensor.map()?;
193        let mut dst_map = dst.tensor.map()?;
194        let y_stride = src.width() as u32;
195        let y_slice = src_map
196            .as_slice()
197            .split_at(y_stride as usize * src.height())
198            .0;
199        let src_chunks = y_slice.as_chunks::<8>();
200        let dst_chunks = dst_map.as_chunks_mut::<8>();
201        for (s, d) in src_chunks.0.iter().zip(dst_chunks.0) {
202            s.iter().zip(d).for_each(|(s, d)| *d = limit_to_full(*s));
203        }
204
205        for (s, d) in src_chunks.1.iter().zip(dst_chunks.1) {
206            *d = limit_to_full(*s);
207        }
208
209        Ok(())
210    }
211
212    fn convert_yuyv_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
213        assert_eq!(src.fourcc(), YUYV);
214        assert_eq!(dst.fourcc(), RGB);
215        let src = yuv::YuvPackedImage::<u8> {
216            yuy: &src.tensor.map()?,
217            yuy_stride: src.row_stride() as u32, // we assume packed yuyv
218            width: src.width() as u32,
219            height: src.height() as u32,
220        };
221
222        Ok(yuv::yuyv422_to_rgb(
223            &src,
224            dst.tensor.map()?.as_mut_slice(),
225            dst.width() as u32 * 3,
226            yuv::YuvRange::Limited,
227            yuv::YuvStandardMatrix::Bt709,
228        )?)
229    }
230
231    fn convert_yuyv_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
232        assert_eq!(src.fourcc(), YUYV);
233        assert_eq!(dst.fourcc(), RGBA);
234        let src = yuv::YuvPackedImage::<u8> {
235            yuy: &src.tensor.map()?,
236            yuy_stride: src.row_stride() as u32, // we assume packed yuyv
237            width: src.width() as u32,
238            height: src.height() as u32,
239        };
240
241        Ok(yuv::yuyv422_to_rgba(
242            &src,
243            dst.tensor.map()?.as_mut_slice(),
244            dst.row_stride() as u32,
245            yuv::YuvRange::Limited,
246            yuv::YuvStandardMatrix::Bt709,
247        )?)
248    }
249
250    fn convert_yuyv_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
251        assert_eq!(src.fourcc(), YUYV);
252        assert_eq!(dst.fourcc(), PLANAR_RGB);
253        let mut tmp = TensorImage::new(src.width(), src.height(), RGB, None)?;
254        Self::convert_yuyv_to_rgb(src, &mut tmp)?;
255        Self::convert_rgb_to_8bps(&tmp, dst)
256    }
257
258    fn convert_yuyv_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
259        assert_eq!(src.fourcc(), YUYV);
260        assert_eq!(dst.fourcc(), PLANAR_RGBA);
261        let mut tmp = TensorImage::new(src.width(), src.height(), RGB, None)?;
262        Self::convert_yuyv_to_rgb(src, &mut tmp)?;
263        Self::convert_rgb_to_prgba(&tmp, dst)
264    }
265
266    fn convert_yuyv_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
267        assert_eq!(src.fourcc(), YUYV);
268        assert_eq!(dst.fourcc(), GREY);
269        let src_map = src.tensor.map()?;
270        let mut dst_map = dst.tensor.map()?;
271        let src_chunks = src_map.as_chunks::<16>();
272        let dst_chunks = dst_map.as_chunks_mut::<8>();
273        for (s, d) in src_chunks.0.iter().zip(dst_chunks.0) {
274            s.iter()
275                .step_by(2)
276                .zip(d)
277                .for_each(|(s, d)| *d = limit_to_full(*s));
278        }
279
280        for (s, d) in src_chunks.1.iter().step_by(2).zip(dst_chunks.1) {
281            *d = limit_to_full(*s);
282        }
283
284        Ok(())
285    }
286
287    fn convert_yuyv_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
288        assert_eq!(src.fourcc(), YUYV);
289        assert_eq!(dst.fourcc(), NV16);
290        let src_map = src.tensor.map()?;
291        let mut dst_map = dst.tensor.map()?;
292
293        let src_chunks = src_map.as_chunks::<2>().0;
294        let (y_plane, uv_plane) = dst_map.split_at_mut(dst.row_stride() * dst.height());
295
296        for ((s, y), uv) in src_chunks.iter().zip(y_plane).zip(uv_plane) {
297            *y = s[0];
298            *uv = s[1];
299        }
300        Ok(())
301    }
302
303    fn convert_grey_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
304        assert_eq!(src.fourcc(), GREY);
305        assert_eq!(dst.fourcc(), RGB);
306        let src = yuv::YuvGrayImage::<u8> {
307            y_plane: &src.tensor.map()?,
308            y_stride: src.row_stride() as u32, // we assume packed Y
309            width: src.width() as u32,
310            height: src.height() as u32,
311        };
312        Ok(yuv::yuv400_to_rgb(
313            &src,
314            dst.tensor.map()?.as_mut_slice(),
315            dst.row_stride() as u32,
316            yuv::YuvRange::Full,
317            yuv::YuvStandardMatrix::Bt709,
318        )?)
319    }
320
321    fn convert_grey_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
322        assert_eq!(src.fourcc(), GREY);
323        assert_eq!(dst.fourcc(), RGBA);
324        let src = yuv::YuvGrayImage::<u8> {
325            y_plane: &src.tensor.map()?,
326            y_stride: src.row_stride() as u32,
327            width: src.width() as u32,
328            height: src.height() as u32,
329        };
330        Ok(yuv::yuv400_to_rgba(
331            &src,
332            dst.tensor.map()?.as_mut_slice(),
333            dst.row_stride() as u32,
334            yuv::YuvRange::Full,
335            yuv::YuvStandardMatrix::Bt709,
336        )?)
337    }
338
339    fn convert_grey_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
340        assert_eq!(src.fourcc(), GREY);
341        assert_eq!(dst.fourcc(), PLANAR_RGB);
342
343        let src = src.tensor().map()?;
344        let src = src.as_slice();
345
346        let mut dst_map = dst.tensor().map()?;
347        let dst_ = dst_map.as_mut_slice();
348
349        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
350        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
351
352        rayon::scope(|s| {
353            s.spawn(|_| dst0.copy_from_slice(src));
354            s.spawn(|_| dst1.copy_from_slice(src));
355            s.spawn(|_| dst2.copy_from_slice(src));
356        });
357        Ok(())
358    }
359
360    fn convert_grey_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
361        assert_eq!(src.fourcc(), GREY);
362        assert_eq!(dst.fourcc(), PLANAR_RGBA);
363
364        let src = src.tensor().map()?;
365        let src = src.as_slice();
366
367        let mut dst_map = dst.tensor().map()?;
368        let dst_ = dst_map.as_mut_slice();
369
370        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
371        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
372        let (dst2, dst3) = dst2.split_at_mut(dst.width() * dst.height());
373        rayon::scope(|s| {
374            s.spawn(|_| dst0.copy_from_slice(src));
375            s.spawn(|_| dst1.copy_from_slice(src));
376            s.spawn(|_| dst2.copy_from_slice(src));
377            s.spawn(|_| dst3.fill(255));
378        });
379        Ok(())
380    }
381
382    fn convert_grey_to_yuyv(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
383        assert_eq!(src.fourcc(), GREY);
384        assert_eq!(dst.fourcc(), YUYV);
385
386        let src = src.tensor().map()?;
387        let src = src.as_slice();
388
389        let mut dst = dst.tensor().map()?;
390        let dst = dst.as_mut_slice();
391        for (s, d) in src
392            .as_chunks::<2>()
393            .0
394            .iter()
395            .zip(dst.as_chunks_mut::<4>().0.iter_mut())
396        {
397            d[0] = full_to_limit(s[0]);
398            d[1] = 128;
399
400            d[2] = full_to_limit(s[1]);
401            d[3] = 128;
402        }
403        Ok(())
404    }
405
406    fn convert_grey_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
407        assert_eq!(src.fourcc(), GREY);
408        assert_eq!(dst.fourcc(), NV16);
409
410        let src = src.tensor().map()?;
411        let src = src.as_slice();
412
413        let mut dst = dst.tensor().map()?;
414        let dst = dst.as_mut_slice();
415
416        for (s, d) in src.iter().zip(dst[0..src.len()].iter_mut()) {
417            *d = full_to_limit(*s);
418        }
419        dst[src.len()..].fill(128);
420
421        Ok(())
422    }
423
424    fn convert_rgba_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
425        assert_eq!(src.fourcc(), RGBA);
426        assert_eq!(dst.fourcc(), RGB);
427
428        Ok(yuv::rgba_to_rgb(
429            src.tensor.map()?.as_slice(),
430            (src.width() * src.channels()) as u32,
431            dst.tensor.map()?.as_mut_slice(),
432            (dst.width() * dst.channels()) as u32,
433            src.width() as u32,
434            src.height() as u32,
435        )?)
436    }
437
438    fn convert_rgba_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
439        assert_eq!(src.fourcc(), RGBA);
440        assert_eq!(dst.fourcc(), GREY);
441
442        let mut dst = yuv::YuvGrayImageMut::<u8> {
443            y_plane: yuv::BufferStoreMut::Borrowed(&mut dst.tensor.map()?),
444            y_stride: dst.row_stride() as u32,
445            width: dst.width() as u32,
446            height: dst.height() as u32,
447        };
448        Ok(yuv::rgba_to_yuv400(
449            &mut dst,
450            src.tensor.map()?.as_slice(),
451            src.row_stride() as u32,
452            yuv::YuvRange::Full,
453            yuv::YuvStandardMatrix::Bt709,
454        )?)
455    }
456
457    fn convert_rgba_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
458        assert_eq!(src.fourcc(), RGBA);
459        assert_eq!(dst.fourcc(), PLANAR_RGB);
460
461        let src = src.tensor().map()?;
462        let src = src.as_slice();
463        let src = src.as_chunks::<4>().0;
464
465        let mut dst_map = dst.tensor().map()?;
466        let dst_ = dst_map.as_mut_slice();
467
468        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
469        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
470
471        src.par_iter()
472            .zip_eq(dst0)
473            .zip_eq(dst1)
474            .zip_eq(dst2)
475            .for_each(|(((s, d0), d1), d2)| {
476                *d0 = s[0];
477                *d1 = s[1];
478                *d2 = s[2];
479            });
480        Ok(())
481    }
482
483    fn convert_rgba_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
484        assert_eq!(src.fourcc(), RGBA);
485        assert_eq!(dst.fourcc(), PLANAR_RGBA);
486
487        let src = src.tensor().map()?;
488        let src = src.as_slice();
489        let src = src.as_chunks::<4>().0;
490
491        let mut dst_map = dst.tensor().map()?;
492        let dst_ = dst_map.as_mut_slice();
493
494        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
495        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
496        let (dst2, dst3) = dst2.split_at_mut(dst.width() * dst.height());
497
498        src.par_iter()
499            .zip_eq(dst0)
500            .zip_eq(dst1)
501            .zip_eq(dst2)
502            .zip_eq(dst3)
503            .for_each(|((((s, d0), d1), d2), d3)| {
504                *d0 = s[0];
505                *d1 = s[1];
506                *d2 = s[2];
507                *d3 = s[3];
508            });
509        Ok(())
510    }
511
512    fn convert_rgba_to_yuyv(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
513        assert_eq!(src.fourcc(), RGBA);
514        assert_eq!(dst.fourcc(), YUYV);
515
516        let src = src.tensor().map()?;
517        let src = src.as_slice();
518
519        let mut dst = dst.tensor().map()?;
520        let dst = dst.as_mut_slice();
521
522        // compute quantized Bt.709 limited range RGB to YUV matrix
523        const KR: f64 = 0.2126f64;
524        const KB: f64 = 0.0722f64;
525        const KG: f64 = 1.0 - KR - KB;
526        const BIAS: i32 = 20;
527
528        const Y_R: i32 = (KR * (219 << BIAS) as f64 / 255.0).round() as i32;
529        const Y_G: i32 = (KG * (219 << BIAS) as f64 / 255.0).round() as i32;
530        const Y_B: i32 = (KB * (219 << BIAS) as f64 / 255.0).round() as i32;
531
532        const U_R: i32 = (-KR / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
533        const U_G: i32 = (-KG / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
534        const U_B: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
535
536        const V_R: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
537        const V_G: i32 = (-KG / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
538        const V_B: i32 = (-KB / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
539        const ROUND: i32 = 1 << (BIAS - 1);
540        const ROUND2: i32 = 1 << BIAS;
541        let process_rgba_to_yuyv = |s: &[u8; 8], d: &mut [u8; 4]| {
542            let [r0, g0, b0, _, r1, g1, b1, _] = *s;
543            let r0 = r0 as i32;
544            let g0 = g0 as i32;
545            let b0 = b0 as i32;
546            let r1 = r1 as i32;
547            let g1 = g1 as i32;
548            let b1 = b1 as i32;
549            d[0] = ((Y_R * r0 + Y_G * g0 + Y_B * b0 + ROUND).shr(BIAS) + 16) as u8;
550            d[1] = ((U_R * r0 + U_G * g0 + U_B * b0 + U_R * r1 + U_G * g1 + U_B * b1 + ROUND2)
551                .shr(BIAS + 1)
552                + 128) as u8;
553            d[2] = ((Y_R * r1 + Y_G * g1 + Y_B * b1 + ROUND).shr(BIAS) + 16) as u8;
554            d[3] = ((V_R * r0 + V_G * g0 + V_B * b0 + V_R * r1 + V_G * g1 + V_B * b1 + ROUND2)
555                .shr(BIAS + 1)
556                + 128) as u8;
557        };
558
559        let src = src.as_chunks::<{ 8 * 32 }>();
560        let dst = dst.as_chunks_mut::<{ 4 * 32 }>();
561
562        for (s, d) in src.0.iter().zip(dst.0.iter_mut()) {
563            let s = s.as_chunks::<8>().0;
564            let d = d.as_chunks_mut::<4>().0;
565            for (s, d) in s.iter().zip(d.iter_mut()) {
566                process_rgba_to_yuyv(s, d);
567            }
568        }
569
570        let s = src.1.as_chunks::<8>().0;
571        let d = dst.1.as_chunks_mut::<4>().0;
572        for (s, d) in s.iter().zip(d.iter_mut()) {
573            process_rgba_to_yuyv(s, d);
574        }
575
576        Ok(())
577    }
578
579    fn convert_rgba_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
580        assert_eq!(src.fourcc(), RGBA);
581        assert_eq!(dst.fourcc(), NV16);
582
583        let mut dst_map = dst.tensor().map()?;
584
585        let (y_plane, uv_plane) = dst_map.split_at_mut(dst.width() * dst.height());
586        let mut bi_planar_image = yuv::YuvBiPlanarImageMut::<u8> {
587            y_plane: yuv::BufferStoreMut::Borrowed(y_plane),
588            y_stride: dst.width() as u32,
589            uv_plane: yuv::BufferStoreMut::Borrowed(uv_plane),
590            uv_stride: dst.width() as u32,
591            width: dst.width() as u32,
592            height: dst.height() as u32,
593        };
594
595        Ok(yuv::rgba_to_yuv_nv16(
596            &mut bi_planar_image,
597            src.tensor.map()?.as_slice(),
598            src.row_stride() as u32,
599            yuv::YuvRange::Limited,
600            yuv::YuvStandardMatrix::Bt709,
601            yuv::YuvConversionMode::Balanced,
602        )?)
603    }
604
605    fn convert_rgb_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
606        assert_eq!(src.fourcc(), RGB);
607        assert_eq!(dst.fourcc(), RGBA);
608
609        Ok(yuv::rgb_to_rgba(
610            src.tensor.map()?.as_slice(),
611            (src.width() * src.channels()) as u32,
612            dst.tensor.map()?.as_mut_slice(),
613            (dst.width() * dst.channels()) as u32,
614            src.width() as u32,
615            src.height() as u32,
616        )?)
617    }
618
619    fn convert_rgb_to_grey(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
620        assert_eq!(src.fourcc(), RGB);
621        assert_eq!(dst.fourcc(), GREY);
622
623        let mut dst = yuv::YuvGrayImageMut::<u8> {
624            y_plane: yuv::BufferStoreMut::Borrowed(&mut dst.tensor.map()?),
625            y_stride: dst.row_stride() as u32,
626            width: dst.width() as u32,
627            height: dst.height() as u32,
628        };
629        Ok(yuv::rgb_to_yuv400(
630            &mut dst,
631            src.tensor.map()?.as_slice(),
632            src.row_stride() as u32,
633            yuv::YuvRange::Full,
634            yuv::YuvStandardMatrix::Bt709,
635        )?)
636    }
637
638    fn convert_rgb_to_8bps(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
639        assert_eq!(src.fourcc(), RGB);
640        assert_eq!(dst.fourcc(), PLANAR_RGB);
641
642        let src = src.tensor().map()?;
643        let src = src.as_slice();
644        let src = src.as_chunks::<3>().0;
645
646        let mut dst_map = dst.tensor().map()?;
647        let dst_ = dst_map.as_mut_slice();
648
649        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
650        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
651
652        src.par_iter()
653            .zip_eq(dst0)
654            .zip_eq(dst1)
655            .zip_eq(dst2)
656            .for_each(|(((s, d0), d1), d2)| {
657                *d0 = s[0];
658                *d1 = s[1];
659                *d2 = s[2];
660            });
661        Ok(())
662    }
663
664    fn convert_rgb_to_prgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
665        assert_eq!(src.fourcc(), RGB);
666        assert_eq!(dst.fourcc(), PLANAR_RGBA);
667
668        let src = src.tensor().map()?;
669        let src = src.as_slice();
670        let src = src.as_chunks::<3>().0;
671
672        let mut dst_map = dst.tensor().map()?;
673        let dst_ = dst_map.as_mut_slice();
674
675        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
676        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
677        let (dst2, dst3) = dst2.split_at_mut(dst.width() * dst.height());
678
679        rayon::scope(|s| {
680            s.spawn(|_| {
681                src.par_iter()
682                    .zip_eq(dst0)
683                    .zip_eq(dst1)
684                    .zip_eq(dst2)
685                    .for_each(|(((s, d0), d1), d2)| {
686                        *d0 = s[0];
687                        *d1 = s[1];
688                        *d2 = s[2];
689                    })
690            });
691            s.spawn(|_| dst3.fill(255));
692        });
693        Ok(())
694    }
695
696    fn convert_rgb_to_yuyv(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
697        assert_eq!(src.fourcc(), RGB);
698        assert_eq!(dst.fourcc(), YUYV);
699
700        let src = src.tensor().map()?;
701        let src = src.as_slice();
702
703        let mut dst = dst.tensor().map()?;
704        let dst = dst.as_mut_slice();
705
706        // compute quantized Bt.709 limited range RGB to YUV matrix
707        const BIAS: i32 = 20;
708        const KR: f64 = 0.2126f64;
709        const KB: f64 = 0.0722f64;
710        const KG: f64 = 1.0 - KR - KB;
711        const Y_R: i32 = (KR * (219 << BIAS) as f64 / 255.0).round() as i32;
712        const Y_G: i32 = (KG * (219 << BIAS) as f64 / 255.0).round() as i32;
713        const Y_B: i32 = (KB * (219 << BIAS) as f64 / 255.0).round() as i32;
714
715        const U_R: i32 = (-KR / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
716        const U_G: i32 = (-KG / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
717        const U_B: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
718
719        const V_R: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
720        const V_G: i32 = (-KG / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
721        const V_B: i32 = (-KB / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
722        const ROUND: i32 = 1 << (BIAS - 1);
723        const ROUND2: i32 = 1 << BIAS;
724        let process_rgb_to_yuyv = |s: &[u8; 6], d: &mut [u8; 4]| {
725            let [r0, g0, b0, r1, g1, b1] = *s;
726            let r0 = r0 as i32;
727            let g0 = g0 as i32;
728            let b0 = b0 as i32;
729            let r1 = r1 as i32;
730            let g1 = g1 as i32;
731            let b1 = b1 as i32;
732            d[0] = ((Y_R * r0 + Y_G * g0 + Y_B * b0 + ROUND).shr(BIAS) + 16) as u8;
733            d[1] = ((U_R * r0 + U_G * g0 + U_B * b0 + U_R * r1 + U_G * g1 + U_B * b1 + ROUND2)
734                .shr(BIAS + 1)
735                + 128) as u8;
736            d[2] = ((Y_R * r1 + Y_G * g1 + Y_B * b1 + ROUND).shr(BIAS) + 16) as u8;
737            d[3] = ((V_R * r0 + V_G * g0 + V_B * b0 + V_R * r1 + V_G * g1 + V_B * b1 + ROUND2)
738                .shr(BIAS + 1)
739                + 128) as u8;
740        };
741
742        let src = src.as_chunks::<{ 6 * 32 }>();
743        let dst = dst.as_chunks_mut::<{ 4 * 32 }>();
744        for (s, d) in src.0.iter().zip(dst.0.iter_mut()) {
745            let s = s.as_chunks::<6>().0;
746            let d = d.as_chunks_mut::<4>().0;
747            for (s, d) in s.iter().zip(d.iter_mut()) {
748                process_rgb_to_yuyv(s, d);
749            }
750        }
751
752        let s = src.1.as_chunks::<6>().0;
753        let d = dst.1.as_chunks_mut::<4>().0;
754        for (s, d) in s.iter().zip(d.iter_mut()) {
755            process_rgb_to_yuyv(s, d);
756        }
757
758        Ok(())
759    }
760
761    fn convert_rgb_to_nv16(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
762        assert_eq!(src.fourcc(), RGB);
763        assert_eq!(dst.fourcc(), NV16);
764
765        let mut dst_map = dst.tensor().map()?;
766
767        let (y_plane, uv_plane) = dst_map.split_at_mut(dst.width() * dst.height());
768        let mut bi_planar_image = yuv::YuvBiPlanarImageMut::<u8> {
769            y_plane: yuv::BufferStoreMut::Borrowed(y_plane),
770            y_stride: dst.width() as u32,
771            uv_plane: yuv::BufferStoreMut::Borrowed(uv_plane),
772            uv_stride: dst.width() as u32,
773            width: dst.width() as u32,
774            height: dst.height() as u32,
775        };
776
777        Ok(yuv::rgb_to_yuv_nv16(
778            &mut bi_planar_image,
779            src.tensor.map()?.as_slice(),
780            src.row_stride() as u32,
781            yuv::YuvRange::Limited,
782            yuv::YuvStandardMatrix::Bt709,
783            yuv::YuvConversionMode::Balanced,
784        )?)
785    }
786
787    fn copy_image(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
788        assert_eq!(src.fourcc(), dst.fourcc());
789        dst.tensor().map()?.copy_from_slice(&src.tensor().map()?);
790        Ok(())
791    }
792
793    fn convert_nv16_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
794        assert_eq!(src.fourcc(), NV16);
795        assert_eq!(dst.fourcc(), RGB);
796        let map = src.tensor.map()?;
797        let y_stride = src.width() as u32;
798        let uv_stride = src.width() as u32;
799        let slices = map.as_slice().split_at(y_stride as usize * src.height());
800
801        let src = yuv::YuvBiPlanarImage {
802            y_plane: slices.0,
803            y_stride,
804            uv_plane: slices.1,
805            uv_stride,
806            width: src.width() as u32,
807            height: src.height() as u32,
808        };
809
810        Ok(yuv::yuv_nv16_to_rgb(
811            &src,
812            dst.tensor.map()?.as_mut_slice(),
813            dst.row_stride() as u32,
814            yuv::YuvRange::Limited,
815            yuv::YuvStandardMatrix::Bt709,
816            yuv::YuvConversionMode::Balanced,
817        )?)
818    }
819
820    fn convert_nv16_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
821        assert_eq!(src.fourcc(), NV16);
822        assert_eq!(dst.fourcc(), RGBA);
823        let map = src.tensor.map()?;
824        let y_stride = src.width() as u32;
825        let uv_stride = src.width() as u32;
826        let slices = map.as_slice().split_at(y_stride as usize * src.height());
827
828        let src = yuv::YuvBiPlanarImage {
829            y_plane: slices.0,
830            y_stride,
831            uv_plane: slices.1,
832            uv_stride,
833            width: src.width() as u32,
834            height: src.height() as u32,
835        };
836
837        Ok(yuv::yuv_nv16_to_rgba(
838            &src,
839            dst.tensor.map()?.as_mut_slice(),
840            dst.row_stride() as u32,
841            yuv::YuvRange::Limited,
842            yuv::YuvStandardMatrix::Bt709,
843            yuv::YuvConversionMode::Balanced,
844        )?)
845    }
846
847    fn convert_8bps_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
848        assert_eq!(src.fourcc(), PLANAR_RGB);
849        assert_eq!(dst.fourcc(), RGB);
850
851        let src_map = src.tensor().map()?;
852        let src_ = src_map.as_slice();
853
854        let (src0, src1) = src_.split_at(src.width() * src.height());
855        let (src1, src2) = src1.split_at(src.width() * src.height());
856
857        let mut dst_map = dst.tensor().map()?;
858        let dst_ = dst_map.as_mut_slice();
859
860        src0.par_iter()
861            .zip_eq(src1)
862            .zip_eq(src2)
863            .zip_eq(dst_.as_chunks_mut::<3>().0.par_iter_mut())
864            .for_each(|(((s0, s1), s2), d)| {
865                d[0] = *s0;
866                d[1] = *s1;
867                d[2] = *s2;
868            });
869        Ok(())
870    }
871
872    fn convert_8bps_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
873        assert_eq!(src.fourcc(), PLANAR_RGB);
874        assert_eq!(dst.fourcc(), RGBA);
875
876        let src_map = src.tensor().map()?;
877        let src_ = src_map.as_slice();
878
879        let (src0, src1) = src_.split_at(src.width() * src.height());
880        let (src1, src2) = src1.split_at(src.width() * src.height());
881
882        let mut dst_map = dst.tensor().map()?;
883        let dst_ = dst_map.as_mut_slice();
884
885        src0.par_iter()
886            .zip_eq(src1)
887            .zip_eq(src2)
888            .zip_eq(dst_.as_chunks_mut::<4>().0.par_iter_mut())
889            .for_each(|(((s0, s1), s2), d)| {
890                d[0] = *s0;
891                d[1] = *s1;
892                d[2] = *s2;
893                d[3] = 255;
894            });
895        Ok(())
896    }
897
898    fn convert_prgba_to_rgb(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
899        assert_eq!(src.fourcc(), PLANAR_RGBA);
900        assert_eq!(dst.fourcc(), RGB);
901
902        let src_map = src.tensor().map()?;
903        let src_ = src_map.as_slice();
904
905        let (src0, src1) = src_.split_at(src.width() * src.height());
906        let (src1, src2) = src1.split_at(src.width() * src.height());
907        let (src2, _src3) = src2.split_at(src.width() * src.height());
908
909        let mut dst_map = dst.tensor().map()?;
910        let dst_ = dst_map.as_mut_slice();
911
912        src0.par_iter()
913            .zip_eq(src1)
914            .zip_eq(src2)
915            .zip_eq(dst_.as_chunks_mut::<3>().0.par_iter_mut())
916            .for_each(|(((s0, s1), s2), d)| {
917                d[0] = *s0;
918                d[1] = *s1;
919                d[2] = *s2;
920            });
921        Ok(())
922    }
923
924    fn convert_prgba_to_rgba(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
925        assert_eq!(src.fourcc(), PLANAR_RGBA);
926        assert_eq!(dst.fourcc(), RGBA);
927
928        let src_map = src.tensor().map()?;
929        let src_ = src_map.as_slice();
930
931        let (src0, src1) = src_.split_at(src.width() * src.height());
932        let (src1, src2) = src1.split_at(src.width() * src.height());
933        let (src2, src3) = src2.split_at(src.width() * src.height());
934
935        let mut dst_map = dst.tensor().map()?;
936        let dst_ = dst_map.as_mut_slice();
937
938        src0.par_iter()
939            .zip_eq(src1)
940            .zip_eq(src2)
941            .zip_eq(src3)
942            .zip_eq(dst_.as_chunks_mut::<4>().0.par_iter_mut())
943            .for_each(|((((s0, s1), s2), s3), d)| {
944                d[0] = *s0;
945                d[1] = *s1;
946                d[2] = *s2;
947                d[3] = *s3;
948            });
949        Ok(())
950    }
951
952    pub(crate) fn support_conversion(src: FourCharCode, dst: FourCharCode) -> bool {
953        matches!(
954            (src, dst),
955            (NV12, RGB)
956                | (NV12, RGBA)
957                | (NV12, GREY)
958                | (NV16, RGB)
959                | (NV16, RGBA)
960                | (YUYV, RGB)
961                | (YUYV, RGBA)
962                | (YUYV, GREY)
963                | (YUYV, YUYV)
964                | (YUYV, PLANAR_RGB)
965                | (YUYV, PLANAR_RGBA)
966                | (YUYV, NV16)
967                | (RGBA, RGB)
968                | (RGBA, RGBA)
969                | (RGBA, GREY)
970                | (RGBA, YUYV)
971                | (RGBA, PLANAR_RGB)
972                | (RGBA, PLANAR_RGBA)
973                | (RGBA, NV16)
974                | (RGB, RGB)
975                | (RGB, RGBA)
976                | (RGB, GREY)
977                | (RGB, YUYV)
978                | (RGB, PLANAR_RGB)
979                | (RGB, PLANAR_RGBA)
980                | (RGB, NV16)
981                | (GREY, RGB)
982                | (GREY, RGBA)
983                | (GREY, GREY)
984                | (GREY, YUYV)
985                | (GREY, PLANAR_RGB)
986                | (GREY, PLANAR_RGBA)
987                | (GREY, NV16)
988        )
989    }
990
991    pub(crate) fn convert_format(src: &TensorImage, dst: &mut TensorImage) -> Result<()> {
992        // shapes should be equal
993        let _timer = FunctionTimer::new(format!(
994            "ImageProcessor::convert_format {} to {}",
995            src.fourcc().display(),
996            dst.fourcc().display()
997        ));
998        assert_eq!(src.height(), dst.height());
999        assert_eq!(src.width(), dst.width());
1000
1001        match (src.fourcc(), dst.fourcc()) {
1002            (NV12, RGB) => Self::convert_nv12_to_rgb(src, dst),
1003            (NV12, RGBA) => Self::convert_nv12_to_rgba(src, dst),
1004            (NV12, GREY) => Self::convert_nv12_to_grey(src, dst),
1005            (YUYV, RGB) => Self::convert_yuyv_to_rgb(src, dst),
1006            (YUYV, RGBA) => Self::convert_yuyv_to_rgba(src, dst),
1007            (YUYV, GREY) => Self::convert_yuyv_to_grey(src, dst),
1008            (YUYV, YUYV) => Self::copy_image(src, dst),
1009            (YUYV, PLANAR_RGB) => Self::convert_yuyv_to_8bps(src, dst),
1010            (YUYV, PLANAR_RGBA) => Self::convert_yuyv_to_prgba(src, dst),
1011            (YUYV, NV16) => Self::convert_yuyv_to_nv16(src, dst),
1012            (RGBA, RGB) => Self::convert_rgba_to_rgb(src, dst),
1013            (RGBA, RGBA) => Self::copy_image(src, dst),
1014            (RGBA, GREY) => Self::convert_rgba_to_grey(src, dst),
1015            (RGBA, YUYV) => Self::convert_rgba_to_yuyv(src, dst),
1016            (RGBA, PLANAR_RGB) => Self::convert_rgba_to_8bps(src, dst),
1017            (RGBA, PLANAR_RGBA) => Self::convert_rgba_to_prgba(src, dst),
1018            (RGBA, NV16) => Self::convert_rgba_to_nv16(src, dst),
1019            (RGB, RGB) => Self::copy_image(src, dst),
1020            (RGB, RGBA) => Self::convert_rgb_to_rgba(src, dst),
1021            (RGB, GREY) => Self::convert_rgb_to_grey(src, dst),
1022            (RGB, YUYV) => Self::convert_rgb_to_yuyv(src, dst),
1023            (RGB, PLANAR_RGB) => Self::convert_rgb_to_8bps(src, dst),
1024            (RGB, PLANAR_RGBA) => Self::convert_rgb_to_prgba(src, dst),
1025            (RGB, NV16) => Self::convert_rgb_to_nv16(src, dst),
1026            (GREY, RGB) => Self::convert_grey_to_rgb(src, dst),
1027            (GREY, RGBA) => Self::convert_grey_to_rgba(src, dst),
1028            (GREY, GREY) => Self::copy_image(src, dst),
1029            (GREY, YUYV) => Self::convert_grey_to_yuyv(src, dst),
1030            (GREY, PLANAR_RGB) => Self::convert_grey_to_8bps(src, dst),
1031            (GREY, PLANAR_RGBA) => Self::convert_grey_to_prgba(src, dst),
1032            (GREY, NV16) => Self::convert_grey_to_nv16(src, dst),
1033
1034            // the following converts are added for use in testing
1035            (NV16, RGB) => Self::convert_nv16_to_rgb(src, dst),
1036            (NV16, RGBA) => Self::convert_nv16_to_rgba(src, dst),
1037            (PLANAR_RGB, RGB) => Self::convert_8bps_to_rgb(src, dst),
1038            (PLANAR_RGB, RGBA) => Self::convert_8bps_to_rgba(src, dst),
1039            (PLANAR_RGBA, RGB) => Self::convert_prgba_to_rgb(src, dst),
1040            (PLANAR_RGBA, RGBA) => Self::convert_prgba_to_rgba(src, dst),
1041            (s, d) => Err(Error::NotSupported(format!(
1042                "Conversion from {} to {}",
1043                s.display(),
1044                d.display()
1045            ))),
1046        }
1047    }
1048
1049    /// Generic RGB to PLANAR_RGB conversion that works with any TensorImageDst.
1050    fn convert_rgb_to_planar_rgb_generic<D: TensorImageDst>(
1051        src: &TensorImage,
1052        dst: &mut D,
1053    ) -> Result<()> {
1054        assert_eq!(src.fourcc(), RGB);
1055        assert_eq!(dst.fourcc(), PLANAR_RGB);
1056
1057        let src = src.tensor().map()?;
1058        let src = src.as_slice();
1059        let src = src.as_chunks::<3>().0;
1060
1061        let mut dst_map = dst.tensor_mut().map()?;
1062        let dst_ = dst_map.as_mut_slice();
1063
1064        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
1065        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
1066
1067        src.par_iter()
1068            .zip_eq(dst0)
1069            .zip_eq(dst1)
1070            .zip_eq(dst2)
1071            .for_each(|(((s, d0), d1), d2)| {
1072                *d0 = s[0];
1073                *d1 = s[1];
1074                *d2 = s[2];
1075            });
1076        Ok(())
1077    }
1078
1079    /// Generic RGBA to PLANAR_RGB conversion that works with any
1080    /// TensorImageDst.
1081    fn convert_rgba_to_planar_rgb_generic<D: TensorImageDst>(
1082        src: &TensorImage,
1083        dst: &mut D,
1084    ) -> Result<()> {
1085        assert_eq!(src.fourcc(), RGBA);
1086        assert_eq!(dst.fourcc(), PLANAR_RGB);
1087
1088        let src = src.tensor().map()?;
1089        let src = src.as_slice();
1090        let src = src.as_chunks::<4>().0;
1091
1092        let mut dst_map = dst.tensor_mut().map()?;
1093        let dst_ = dst_map.as_mut_slice();
1094
1095        let (dst0, dst1) = dst_.split_at_mut(dst.width() * dst.height());
1096        let (dst1, dst2) = dst1.split_at_mut(dst.width() * dst.height());
1097
1098        src.par_iter()
1099            .zip_eq(dst0)
1100            .zip_eq(dst1)
1101            .zip_eq(dst2)
1102            .for_each(|(((s, d0), d1), d2)| {
1103                *d0 = s[0];
1104                *d1 = s[1];
1105                *d2 = s[2];
1106            });
1107        Ok(())
1108    }
1109
1110    /// Generic copy for same-format images that works with any TensorImageDst.
1111    fn copy_image_generic<D: TensorImageDst>(src: &TensorImage, dst: &mut D) -> Result<()> {
1112        assert_eq!(src.fourcc(), dst.fourcc());
1113        dst.tensor_mut()
1114            .map()?
1115            .copy_from_slice(&src.tensor().map()?);
1116        Ok(())
1117    }
1118
1119    /// Format conversion that writes to a generic TensorImageDst.
1120    /// Supports common zero-copy preprocessing cases.
1121    pub(crate) fn convert_format_generic<D: TensorImageDst>(
1122        src: &TensorImage,
1123        dst: &mut D,
1124    ) -> Result<()> {
1125        let _timer = FunctionTimer::new(format!(
1126            "ImageProcessor::convert_format_generic {} to {}",
1127            src.fourcc().display(),
1128            dst.fourcc().display()
1129        ));
1130        assert_eq!(src.height(), dst.height());
1131        assert_eq!(src.width(), dst.width());
1132
1133        match (src.fourcc(), dst.fourcc()) {
1134            (RGB, PLANAR_RGB) => Self::convert_rgb_to_planar_rgb_generic(src, dst),
1135            (RGBA, PLANAR_RGB) => Self::convert_rgba_to_planar_rgb_generic(src, dst),
1136            (f1, f2) if f1 == f2 => Self::copy_image_generic(src, dst),
1137            (s, d) => Err(Error::NotSupported(format!(
1138                "Generic conversion from {} to {} not supported",
1139                s.display(),
1140                d.display()
1141            ))),
1142        }
1143    }
1144
1145    /// The src and dest img should be in RGB/RGBA/grey format for correct
1146    /// output. If the format is not 1, 3, or 4 bits per pixel, and error will
1147    /// be returned. The src and dest img must have the same fourcc,
1148    /// otherwise the function will panic.
1149    fn resize_flip_rotate(
1150        &mut self,
1151        src: &TensorImage,
1152        dst: &mut TensorImage,
1153        rotation: Rotation,
1154        flip: Flip,
1155        crop: Crop,
1156    ) -> Result<()> {
1157        let _timer = FunctionTimer::new(format!(
1158            "ImageProcessor::resize_flip_rotate {}x{} to {}x{} {}",
1159            src.width(),
1160            src.height(),
1161            dst.width(),
1162            dst.height(),
1163            dst.fourcc().display()
1164        ));
1165        assert_eq!(src.fourcc(), dst.fourcc());
1166
1167        let src_type = match src.channels() {
1168            1 => fast_image_resize::PixelType::U8,
1169            3 => fast_image_resize::PixelType::U8x3,
1170            4 => fast_image_resize::PixelType::U8x4,
1171            _ => {
1172                return Err(Error::NotImplemented(
1173                    "Unsupported source image format".to_string(),
1174                ));
1175            }
1176        };
1177
1178        let mut src_map = src.tensor().map()?;
1179
1180        let mut dst_map = dst.tensor().map()?;
1181
1182        let options = if let Some(crop) = crop.src_rect {
1183            self.options.crop(
1184                crop.left as f64,
1185                crop.top as f64,
1186                crop.width as f64,
1187                crop.height as f64,
1188            )
1189        } else {
1190            self.options
1191        };
1192
1193        let mut dst_rect = crop.dst_rect.unwrap_or_else(|| Rect {
1194            left: 0,
1195            top: 0,
1196            width: dst.width(),
1197            height: dst.height(),
1198        });
1199
1200        // adjust crop box for rotation/flip
1201        Self::adjust_dest_rect_for_rotate_flip(&mut dst_rect, dst, rotation, flip);
1202
1203        let needs_resize = src.width() != dst.width()
1204            || src.height() != dst.height()
1205            || crop.src_rect.is_some_and(|crop| {
1206                crop != Rect {
1207                    left: 0,
1208                    top: 0,
1209                    width: src.width(),
1210                    height: src.height(),
1211                }
1212            })
1213            || crop.dst_rect.is_some_and(|crop| {
1214                crop != Rect {
1215                    left: 0,
1216                    top: 0,
1217                    width: dst.width(),
1218                    height: dst.height(),
1219                }
1220            });
1221
1222        if needs_resize {
1223            let src_view = fast_image_resize::images::Image::from_slice_u8(
1224                src.width() as u32,
1225                src.height() as u32,
1226                &mut src_map,
1227                src_type,
1228            )?;
1229
1230            match (rotation, flip) {
1231                (Rotation::None, Flip::None) => {
1232                    let mut dst_view = fast_image_resize::images::Image::from_slice_u8(
1233                        dst.width() as u32,
1234                        dst.height() as u32,
1235                        &mut dst_map,
1236                        src_type,
1237                    )?;
1238
1239                    let mut dst_view = fast_image_resize::images::CroppedImageMut::new(
1240                        &mut dst_view,
1241                        dst_rect.left as u32,
1242                        dst_rect.top as u32,
1243                        dst_rect.width as u32,
1244                        dst_rect.height as u32,
1245                    )?;
1246
1247                    self.resizer.resize(&src_view, &mut dst_view, &options)?;
1248                }
1249                (Rotation::Clockwise90, _) | (Rotation::CounterClockwise90, _) => {
1250                    let mut tmp = vec![0; dst.row_stride() * dst.height()];
1251                    let mut tmp_view = fast_image_resize::images::Image::from_slice_u8(
1252                        dst.height() as u32,
1253                        dst.width() as u32,
1254                        &mut tmp,
1255                        src_type,
1256                    )?;
1257
1258                    let mut tmp_view = fast_image_resize::images::CroppedImageMut::new(
1259                        &mut tmp_view,
1260                        dst_rect.left as u32,
1261                        dst_rect.top as u32,
1262                        dst_rect.width as u32,
1263                        dst_rect.height as u32,
1264                    )?;
1265
1266                    self.resizer.resize(&src_view, &mut tmp_view, &options)?;
1267                    Self::flip_rotate_ndarray(&tmp, &mut dst_map, dst, rotation, flip)?;
1268                }
1269                (Rotation::None, _) | (Rotation::Rotate180, _) => {
1270                    let mut tmp = vec![0; dst.row_stride() * dst.height()];
1271                    let mut tmp_view = fast_image_resize::images::Image::from_slice_u8(
1272                        dst.width() as u32,
1273                        dst.height() as u32,
1274                        &mut tmp,
1275                        src_type,
1276                    )?;
1277
1278                    let mut tmp_view = fast_image_resize::images::CroppedImageMut::new(
1279                        &mut tmp_view,
1280                        dst_rect.left as u32,
1281                        dst_rect.top as u32,
1282                        dst_rect.width as u32,
1283                        dst_rect.height as u32,
1284                    )?;
1285
1286                    self.resizer.resize(&src_view, &mut tmp_view, &options)?;
1287                    Self::flip_rotate_ndarray(&tmp, &mut dst_map, dst, rotation, flip)?;
1288                }
1289            }
1290        } else {
1291            Self::flip_rotate_ndarray(&src_map, &mut dst_map, dst, rotation, flip)?;
1292        }
1293        Ok(())
1294    }
1295
1296    fn adjust_dest_rect_for_rotate_flip(
1297        crop: &mut Rect,
1298        dst: &TensorImage,
1299        rot: Rotation,
1300        flip: Flip,
1301    ) {
1302        match rot {
1303            Rotation::None => {}
1304            Rotation::Clockwise90 => {
1305                *crop = Rect {
1306                    left: crop.top,
1307                    top: dst.width() - crop.left - crop.width,
1308                    width: crop.height,
1309                    height: crop.width,
1310                }
1311            }
1312            Rotation::Rotate180 => {
1313                *crop = Rect {
1314                    left: dst.width() - crop.left - crop.width,
1315                    top: dst.height() - crop.top - crop.height,
1316                    width: crop.width,
1317                    height: crop.height,
1318                }
1319            }
1320            Rotation::CounterClockwise90 => {
1321                *crop = Rect {
1322                    left: dst.height() - crop.top - crop.height,
1323                    top: crop.left,
1324                    width: crop.height,
1325                    height: crop.width,
1326                }
1327            }
1328        }
1329
1330        match flip {
1331            Flip::None => {}
1332            Flip::Vertical => crop.top = dst.height() - crop.top - crop.height,
1333            Flip::Horizontal => crop.left = dst.width() - crop.left - crop.width,
1334        }
1335    }
1336
1337    /// Fills the area outside a crop rectangle with the specified color.
1338    pub fn fill_image_outside_crop(dst: &mut TensorImage, rgba: [u8; 4], crop: Rect) -> Result<()> {
1339        let dst_fourcc = dst.fourcc();
1340        let mut dst_map = dst.tensor().map()?;
1341        let dst = (dst_map.as_mut_slice(), dst.width(), dst.height());
1342        match dst_fourcc {
1343            RGBA => Self::fill_image_outside_crop_(dst, rgba, crop),
1344            RGB => Self::fill_image_outside_crop_(dst, Self::rgba_to_rgb(rgba), crop),
1345            GREY => Self::fill_image_outside_crop_(dst, Self::rgba_to_grey(rgba), crop),
1346            YUYV => Self::fill_image_outside_crop_(
1347                (dst.0, dst.1 / 2, dst.2),
1348                Self::rgba_to_yuyv(rgba),
1349                Rect::new(crop.left / 2, crop.top, crop.width.div_ceil(2), crop.height),
1350            ),
1351            PLANAR_RGB => Self::fill_image_outside_crop_planar(dst, Self::rgba_to_rgb(rgba), crop),
1352            PLANAR_RGBA => Self::fill_image_outside_crop_planar(dst, rgba, crop),
1353            NV16 => {
1354                let yuyv = Self::rgba_to_yuyv(rgba);
1355                Self::fill_image_outside_crop_yuv_semiplanar(dst, yuyv[0], [yuyv[1], yuyv[3]], crop)
1356            }
1357            _ => Err(Error::Internal(format!(
1358                "Found unexpected destination {}",
1359                dst_fourcc.display()
1360            ))),
1361        }
1362    }
1363
1364    /// Generic fill for TensorImageDst types.
1365    pub(crate) fn fill_image_outside_crop_generic<D: TensorImageDst>(
1366        dst: &mut D,
1367        rgba: [u8; 4],
1368        crop: Rect,
1369    ) -> Result<()> {
1370        let dst_fourcc = dst.fourcc();
1371        let dst_width = dst.width();
1372        let dst_height = dst.height();
1373        let mut dst_map = dst.tensor_mut().map()?;
1374        let dst = (dst_map.as_mut_slice(), dst_width, dst_height);
1375        match dst_fourcc {
1376            RGBA => Self::fill_image_outside_crop_(dst, rgba, crop),
1377            RGB => Self::fill_image_outside_crop_(dst, Self::rgba_to_rgb(rgba), crop),
1378            GREY => Self::fill_image_outside_crop_(dst, Self::rgba_to_grey(rgba), crop),
1379            YUYV => Self::fill_image_outside_crop_(
1380                (dst.0, dst.1 / 2, dst.2),
1381                Self::rgba_to_yuyv(rgba),
1382                Rect::new(crop.left / 2, crop.top, crop.width.div_ceil(2), crop.height),
1383            ),
1384            PLANAR_RGB => Self::fill_image_outside_crop_planar(dst, Self::rgba_to_rgb(rgba), crop),
1385            PLANAR_RGBA => Self::fill_image_outside_crop_planar(dst, rgba, crop),
1386            NV16 => {
1387                let yuyv = Self::rgba_to_yuyv(rgba);
1388                Self::fill_image_outside_crop_yuv_semiplanar(dst, yuyv[0], [yuyv[1], yuyv[3]], crop)
1389            }
1390            _ => Err(Error::Internal(format!(
1391                "Found unexpected destination {}",
1392                dst_fourcc.display()
1393            ))),
1394        }
1395    }
1396
1397    fn fill_image_outside_crop_<const N: usize>(
1398        (dst, dst_width, _dst_height): (&mut [u8], usize, usize),
1399        pix: [u8; N],
1400        crop: Rect,
1401    ) -> Result<()> {
1402        use rayon::{
1403            iter::{IntoParallelRefMutIterator, ParallelIterator},
1404            prelude::ParallelSliceMut,
1405        };
1406
1407        let s = dst.as_chunks_mut::<N>().0;
1408        // calculate the top/bottom
1409        let top_offset = (0, (crop.top * dst_width + crop.left));
1410        let bottom_offset = (
1411            ((crop.top + crop.height) * dst_width + crop.left).min(s.len()),
1412            s.len(),
1413        );
1414
1415        s[top_offset.0..top_offset.1]
1416            .par_iter_mut()
1417            .for_each(|x| *x = pix);
1418
1419        s[bottom_offset.0..bottom_offset.1]
1420            .par_iter_mut()
1421            .for_each(|x| *x = pix);
1422
1423        if dst_width == crop.width {
1424            return Ok(());
1425        }
1426
1427        // the middle part has a stride as well
1428        let middle_stride = dst_width - crop.width;
1429        let middle_offset = (
1430            (crop.top * dst_width + crop.left + crop.width),
1431            ((crop.top + crop.height) * dst_width + crop.left + crop.width).min(s.len()),
1432        );
1433
1434        s[middle_offset.0..middle_offset.1]
1435            .par_chunks_exact_mut(dst_width)
1436            .for_each(|row| {
1437                for p in &mut row[0..middle_stride] {
1438                    *p = pix;
1439                }
1440            });
1441
1442        Ok(())
1443    }
1444
1445    fn fill_image_outside_crop_planar<const N: usize>(
1446        (dst, dst_width, dst_height): (&mut [u8], usize, usize),
1447        pix: [u8; N],
1448        crop: Rect,
1449    ) -> Result<()> {
1450        use rayon::{
1451            iter::{IntoParallelRefMutIterator, ParallelIterator},
1452            prelude::ParallelSliceMut,
1453        };
1454
1455        // map.as_mut_slice().splitn_mut(n, pred)
1456        let s_rem = dst;
1457
1458        s_rem
1459            .par_chunks_exact_mut(dst_height * dst_width)
1460            .zip(pix)
1461            .for_each(|(s, p)| {
1462                let top_offset = (0, (crop.top * dst_width + crop.left));
1463                let bottom_offset = (
1464                    ((crop.top + crop.height) * dst_width + crop.left).min(s.len()),
1465                    s.len(),
1466                );
1467
1468                s[top_offset.0..top_offset.1]
1469                    .par_iter_mut()
1470                    .for_each(|x| *x = p);
1471
1472                s[bottom_offset.0..bottom_offset.1]
1473                    .par_iter_mut()
1474                    .for_each(|x| *x = p);
1475
1476                if dst_width == crop.width {
1477                    return;
1478                }
1479
1480                // the middle part has a stride as well
1481                let middle_stride = dst_width - crop.width;
1482                let middle_offset = (
1483                    (crop.top * dst_width + crop.left + crop.width),
1484                    ((crop.top + crop.height) * dst_width + crop.left + crop.width).min(s.len()),
1485                );
1486
1487                s[middle_offset.0..middle_offset.1]
1488                    .par_chunks_exact_mut(dst_width)
1489                    .for_each(|row| {
1490                        for x in &mut row[0..middle_stride] {
1491                            *x = p;
1492                        }
1493                    });
1494            });
1495        Ok(())
1496    }
1497
1498    fn fill_image_outside_crop_yuv_semiplanar(
1499        (dst, dst_width, dst_height): (&mut [u8], usize, usize),
1500        y: u8,
1501        uv: [u8; 2],
1502        mut crop: Rect,
1503    ) -> Result<()> {
1504        let (y_plane, uv_plane) = dst.split_at_mut(dst_width * dst_height);
1505        Self::fill_image_outside_crop_::<1>((y_plane, dst_width, dst_height), [y], crop)?;
1506        crop.left /= 2;
1507        crop.width /= 2;
1508        Self::fill_image_outside_crop_::<2>((uv_plane, dst_width / 2, dst_height), uv, crop)?;
1509        Ok(())
1510    }
1511
1512    fn rgba_to_rgb(rgba: [u8; 4]) -> [u8; 3] {
1513        let [r, g, b, _] = rgba;
1514        [r, g, b]
1515    }
1516
1517    fn rgba_to_grey(rgba: [u8; 4]) -> [u8; 1] {
1518        const BIAS: i32 = 20;
1519        const KR: f64 = 0.2126f64;
1520        const KB: f64 = 0.0722f64;
1521        const KG: f64 = 1.0 - KR - KB;
1522        const Y_R: i32 = (KR * (255 << BIAS) as f64 / 255.0).round() as i32;
1523        const Y_G: i32 = (KG * (255 << BIAS) as f64 / 255.0).round() as i32;
1524        const Y_B: i32 = (KB * (255 << BIAS) as f64 / 255.0).round() as i32;
1525
1526        const ROUND: i32 = 1 << (BIAS - 1);
1527
1528        let [r, g, b, _] = rgba;
1529        let y = ((Y_R * r as i32 + Y_G * g as i32 + Y_B * b as i32 + ROUND) >> BIAS) as u8;
1530        [y]
1531    }
1532
1533    fn rgba_to_yuyv(rgba: [u8; 4]) -> [u8; 4] {
1534        const KR: f64 = 0.2126f64;
1535        const KB: f64 = 0.0722f64;
1536        const KG: f64 = 1.0 - KR - KB;
1537        const BIAS: i32 = 20;
1538
1539        const Y_R: i32 = (KR * (219 << BIAS) as f64 / 255.0).round() as i32;
1540        const Y_G: i32 = (KG * (219 << BIAS) as f64 / 255.0).round() as i32;
1541        const Y_B: i32 = (KB * (219 << BIAS) as f64 / 255.0).round() as i32;
1542
1543        const U_R: i32 = (-KR / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1544        const U_G: i32 = (-KG / (KR + KG) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1545        const U_B: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
1546
1547        const V_R: i32 = (0.5_f64 * (224 << BIAS) as f64 / 255.0).ceil() as i32;
1548        const V_G: i32 = (-KG / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1549        const V_B: i32 = (-KB / (KG + KB) / 2.0 * (224 << BIAS) as f64 / 255.0).round() as i32;
1550        const ROUND: i32 = 1 << (BIAS - 1);
1551
1552        let [r, g, b, _] = rgba;
1553        let r = r as i32;
1554        let g = g as i32;
1555        let b = b as i32;
1556        let y = (((Y_R * r + Y_G * g + Y_B * b + ROUND) >> BIAS) + 16) as u8;
1557        let u = (((U_R * r + U_G * g + U_B * b + ROUND) >> BIAS) + 128) as u8;
1558        let v = (((V_R * r + V_G * g + V_B * b + ROUND) >> BIAS) + 128) as u8;
1559
1560        [y, u, y, v]
1561    }
1562
1563    #[cfg(feature = "decoder")]
1564    fn render_modelpack_segmentation(
1565        &mut self,
1566        dst: &TensorImage,
1567        dst_slice: &mut [u8],
1568        segmentation: &Segmentation,
1569    ) -> Result<()> {
1570        use ndarray_stats::QuantileExt;
1571
1572        let seg = &segmentation.segmentation;
1573        let [seg_height, seg_width, seg_classes] = *seg.shape() else {
1574            unreachable!("Array3 did not have [usize; 3] as shape");
1575        };
1576        let start_y = (dst.height() as f32 * segmentation.ymin).round();
1577        let end_y = (dst.height() as f32 * segmentation.ymax).round();
1578        let start_x = (dst.width() as f32 * segmentation.xmin).round();
1579        let end_x = (dst.width() as f32 * segmentation.xmax).round();
1580
1581        let scale_x = (seg_width as f32 - 1.0) / ((end_x - start_x) - 1.0);
1582        let scale_y = (seg_height as f32 - 1.0) / ((end_y - start_y) - 1.0);
1583
1584        let start_x_u = (start_x as usize).min(dst.width());
1585        let start_y_u = (start_y as usize).min(dst.height());
1586        let end_x_u = (end_x as usize).min(dst.width());
1587        let end_y_u = (end_y as usize).min(dst.height());
1588
1589        let argmax = seg.map_axis(Axis(2), |r| r.argmax().unwrap());
1590        let get_value_at_nearest = |x: f32, y: f32| -> usize {
1591            let x = x.round() as usize;
1592            let y = y.round() as usize;
1593            argmax
1594                .get([y.min(seg_height - 1), x.min(seg_width - 1)])
1595                .copied()
1596                .unwrap_or(0)
1597        };
1598
1599        for y in start_y_u..end_y_u {
1600            for x in start_x_u..end_x_u {
1601                let seg_x = (x as f32 - start_x) * scale_x;
1602                let seg_y = (y as f32 - start_y) * scale_y;
1603                let label = get_value_at_nearest(seg_x, seg_y);
1604
1605                if label == seg_classes - 1 {
1606                    continue;
1607                }
1608
1609                let color = self.colors[label % self.colors.len()];
1610
1611                let alpha = color[3] as u16;
1612
1613                let dst_index = (y * dst.row_stride()) + (x * dst.channels());
1614                for c in 0..3 {
1615                    dst_slice[dst_index + c] = ((color[c] as u16 * alpha
1616                        + dst_slice[dst_index + c] as u16 * (255 - alpha))
1617                        / 255) as u8;
1618                }
1619            }
1620        }
1621
1622        Ok(())
1623    }
1624
1625    #[cfg(feature = "decoder")]
1626    fn render_yolo_segmentation(
1627        &mut self,
1628        dst: &TensorImage,
1629        dst_slice: &mut [u8],
1630        segmentation: &Segmentation,
1631        class: usize,
1632    ) -> Result<()> {
1633        let seg = &segmentation.segmentation;
1634        let [seg_height, seg_width, classes] = *seg.shape() else {
1635            unreachable!("Array3 did not have [usize;3] as shape");
1636        };
1637        debug_assert_eq!(classes, 1);
1638
1639        let start_y = (dst.height() as f32 * segmentation.ymin).round();
1640        let end_y = (dst.height() as f32 * segmentation.ymax).round();
1641        let start_x = (dst.width() as f32 * segmentation.xmin).round();
1642        let end_x = (dst.width() as f32 * segmentation.xmax).round();
1643
1644        let scale_x = (seg_width as f32 - 1.0) / ((end_x - start_x) - 1.0);
1645        let scale_y = (seg_height as f32 - 1.0) / ((end_y - start_y) - 1.0);
1646
1647        let start_x_u = (start_x as usize).min(dst.width());
1648        let start_y_u = (start_y as usize).min(dst.height());
1649        let end_x_u = (end_x as usize).min(dst.width());
1650        let end_y_u = (end_y as usize).min(dst.height());
1651
1652        for y in start_y_u..end_y_u {
1653            for x in start_x_u..end_x_u {
1654                let seg_x = ((x as f32 - start_x) * scale_x) as usize;
1655                let seg_y = ((y as f32 - start_y) * scale_y) as usize;
1656                let val = *seg.get([seg_y, seg_x, 0]).unwrap_or(&0);
1657
1658                if val < 127 {
1659                    continue;
1660                }
1661
1662                let color = self.colors[class % self.colors.len()];
1663
1664                let alpha = color[3] as u16;
1665
1666                let dst_index = (y * dst.row_stride()) + (x * dst.channels());
1667                for c in 0..3 {
1668                    dst_slice[dst_index + c] = ((color[c] as u16 * alpha
1669                        + dst_slice[dst_index + c] as u16 * (255 - alpha))
1670                        / 255) as u8;
1671                }
1672            }
1673        }
1674
1675        Ok(())
1676    }
1677
1678    #[cfg(feature = "decoder")]
1679    fn render_box(
1680        &mut self,
1681        dst: &TensorImage,
1682        dst_slice: &mut [u8],
1683        detect: &[DetectBox],
1684    ) -> Result<()> {
1685        const LINE_THICKNESS: usize = 3;
1686        for d in detect {
1687            use edgefirst_decoder::BoundingBox;
1688
1689            let label = d.label;
1690            let [r, g, b, _] = self.colors[label % self.colors.len()];
1691            let bbox = d.bbox.to_canonical();
1692            let bbox = BoundingBox {
1693                xmin: bbox.xmin.clamp(0.0, 1.0),
1694                ymin: bbox.ymin.clamp(0.0, 1.0),
1695                xmax: bbox.xmax.clamp(0.0, 1.0),
1696                ymax: bbox.ymax.clamp(0.0, 1.0),
1697            };
1698            let inner = [
1699                ((dst.width() - 1) as f32 * bbox.xmin - 0.5).round() as usize,
1700                ((dst.height() - 1) as f32 * bbox.ymin - 0.5).round() as usize,
1701                ((dst.width() - 1) as f32 * bbox.xmax + 0.5).round() as usize,
1702                ((dst.height() - 1) as f32 * bbox.ymax + 0.5).round() as usize,
1703            ];
1704
1705            let outer = [
1706                inner[0].saturating_sub(LINE_THICKNESS),
1707                inner[1].saturating_sub(LINE_THICKNESS),
1708                (inner[2] + LINE_THICKNESS).min(dst.width()),
1709                (inner[3] + LINE_THICKNESS).min(dst.height()),
1710            ];
1711
1712            // top line
1713            for y in outer[1] + 1..=inner[1] {
1714                for x in outer[0] + 1..outer[2] {
1715                    let index = (y * dst.row_stride()) + (x * dst.channels());
1716                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1717                }
1718            }
1719
1720            // left and right lines
1721            for y in inner[1]..inner[3] {
1722                for x in outer[0] + 1..=inner[0] {
1723                    let index = (y * dst.row_stride()) + (x * dst.channels());
1724                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1725                }
1726
1727                for x in inner[2]..outer[2] {
1728                    let index = (y * dst.row_stride()) + (x * dst.channels());
1729                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1730                }
1731            }
1732
1733            // bottom line
1734            for y in inner[3]..outer[3] {
1735                for x in outer[0] + 1..outer[2] {
1736                    let index = (y * dst.row_stride()) + (x * dst.channels());
1737                    dst_slice[index..(index + 3)].copy_from_slice(&[r, g, b]);
1738                }
1739            }
1740        }
1741        Ok(())
1742    }
1743}
1744
1745impl ImageProcessorTrait for CPUProcessor {
1746    fn convert(
1747        &mut self,
1748        src: &TensorImage,
1749        dst: &mut TensorImage,
1750        rotation: Rotation,
1751        flip: Flip,
1752        crop: Crop,
1753    ) -> Result<()> {
1754        crop.check_crop(src, dst)?;
1755        // supported destinations and srcs:
1756        let intermediate = match (src.fourcc(), dst.fourcc()) {
1757            (NV12, RGB) => RGB,
1758            (NV12, RGBA) => RGBA,
1759            (NV12, GREY) => GREY,
1760            (NV12, YUYV) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1761            (NV12, NV16) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1762            (NV12, PLANAR_RGB) => RGB,
1763            (NV12, PLANAR_RGBA) => RGBA,
1764            (YUYV, RGB) => RGB,
1765            (YUYV, RGBA) => RGBA,
1766            (YUYV, GREY) => GREY,
1767            (YUYV, YUYV) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1768            (YUYV, PLANAR_RGB) => RGB,
1769            (YUYV, PLANAR_RGBA) => RGBA,
1770            (YUYV, NV16) => RGBA,
1771            (RGBA, RGB) => RGBA,
1772            (RGBA, RGBA) => RGBA,
1773            (RGBA, GREY) => GREY,
1774            (RGBA, YUYV) => RGBA, // RGBA intermediary for YUYV dest resize/convert/rotation/flip
1775            (RGBA, PLANAR_RGB) => RGBA,
1776            (RGBA, PLANAR_RGBA) => RGBA,
1777            (RGBA, NV16) => RGBA,
1778            (RGB, RGB) => RGB,
1779            (RGB, RGBA) => RGB,
1780            (RGB, GREY) => GREY,
1781            (RGB, YUYV) => RGB, // RGB intermediary for YUYV dest resize/convert/rotation/flip
1782            (RGB, PLANAR_RGB) => RGB,
1783            (RGB, PLANAR_RGBA) => RGB,
1784            (RGB, NV16) => RGB,
1785            (GREY, RGB) => RGB,
1786            (GREY, RGBA) => RGBA,
1787            (GREY, GREY) => GREY,
1788            (GREY, YUYV) => GREY,
1789            (GREY, PLANAR_RGB) => GREY,
1790            (GREY, PLANAR_RGBA) => GREY,
1791            (GREY, NV16) => GREY,
1792            (s, d) => {
1793                return Err(Error::NotSupported(format!(
1794                    "Conversion from {} to {}",
1795                    s.display(),
1796                    d.display()
1797                )));
1798            }
1799        };
1800
1801        // let crop = crop.src_rect;
1802
1803        let need_resize_flip_rotation = rotation != Rotation::None
1804            || flip != Flip::None
1805            || src.width() != dst.width()
1806            || src.height() != dst.height()
1807            || crop.src_rect.is_some_and(|crop| {
1808                crop != Rect {
1809                    left: 0,
1810                    top: 0,
1811                    width: src.width(),
1812                    height: src.height(),
1813                }
1814            })
1815            || crop.dst_rect.is_some_and(|crop| {
1816                crop != Rect {
1817                    left: 0,
1818                    top: 0,
1819                    width: dst.width(),
1820                    height: dst.height(),
1821                }
1822            });
1823
1824        // check if a direct conversion can be done
1825        if !need_resize_flip_rotation && Self::support_conversion(src.fourcc(), dst.fourcc()) {
1826            return Self::convert_format(src, dst);
1827        };
1828
1829        // any extra checks
1830        if dst.fourcc() == YUYV && !dst.width().is_multiple_of(2) {
1831            return Err(Error::NotSupported(format!(
1832                "{} destination must have width divisible by 2",
1833                dst.fourcc().display(),
1834            )));
1835        }
1836
1837        // create tmp buffer
1838        let mut tmp_buffer;
1839        let tmp;
1840        if intermediate != src.fourcc() {
1841            tmp_buffer = TensorImage::new(
1842                src.width(),
1843                src.height(),
1844                intermediate,
1845                Some(edgefirst_tensor::TensorMemory::Mem),
1846            )?;
1847
1848            Self::convert_format(src, &mut tmp_buffer)?;
1849            tmp = &tmp_buffer;
1850        } else {
1851            tmp = src;
1852        }
1853
1854        // format must be RGB/RGBA/GREY
1855        matches!(tmp.fourcc(), RGB | RGBA | GREY);
1856        if tmp.fourcc() == dst.fourcc() {
1857            self.resize_flip_rotate(tmp, dst, rotation, flip, crop)?;
1858        } else if !need_resize_flip_rotation {
1859            Self::convert_format(tmp, dst)?;
1860        } else {
1861            let mut tmp2 = TensorImage::new(
1862                dst.width(),
1863                dst.height(),
1864                tmp.fourcc(),
1865                Some(edgefirst_tensor::TensorMemory::Mem),
1866            )?;
1867            if crop.dst_rect.is_some_and(|crop| {
1868                crop != Rect {
1869                    left: 0,
1870                    top: 0,
1871                    width: dst.width(),
1872                    height: dst.height(),
1873                }
1874            }) && crop.dst_color.is_none()
1875            {
1876                // convert the dst into tmp2 when there is a dst crop
1877                // TODO: this could be optimized by changing convert_format to take a
1878                // destination crop?
1879
1880                Self::convert_format(dst, &mut tmp2)?;
1881            }
1882            self.resize_flip_rotate(tmp, &mut tmp2, rotation, flip, crop)?;
1883            Self::convert_format(&tmp2, dst)?;
1884        }
1885        if let (Some(dst_rect), Some(dst_color)) = (crop.dst_rect, crop.dst_color) {
1886            let full_rect = Rect {
1887                left: 0,
1888                top: 0,
1889                width: dst.width(),
1890                height: dst.height(),
1891            };
1892            if dst_rect != full_rect {
1893                Self::fill_image_outside_crop(dst, dst_color, dst_rect)?;
1894            }
1895        }
1896
1897        Ok(())
1898    }
1899
1900    fn convert_ref(
1901        &mut self,
1902        src: &TensorImage,
1903        dst: &mut TensorImageRef<'_>,
1904        rotation: Rotation,
1905        flip: Flip,
1906        crop: Crop,
1907    ) -> Result<()> {
1908        crop.check_crop_ref(src, dst)?;
1909
1910        // Determine intermediate format needed for conversion
1911        let intermediate = match (src.fourcc(), dst.fourcc()) {
1912            (NV12, RGB) => RGB,
1913            (NV12, RGBA) => RGBA,
1914            (NV12, GREY) => GREY,
1915            (NV12, PLANAR_RGB) => RGB,
1916            (NV12, PLANAR_RGBA) => RGBA,
1917            (YUYV, RGB) => RGB,
1918            (YUYV, RGBA) => RGBA,
1919            (YUYV, GREY) => GREY,
1920            (YUYV, PLANAR_RGB) => RGB,
1921            (YUYV, PLANAR_RGBA) => RGBA,
1922            (RGBA, RGB) => RGBA,
1923            (RGBA, RGBA) => RGBA,
1924            (RGBA, GREY) => GREY,
1925            (RGBA, PLANAR_RGB) => RGBA,
1926            (RGBA, PLANAR_RGBA) => RGBA,
1927            (RGB, RGB) => RGB,
1928            (RGB, RGBA) => RGB,
1929            (RGB, GREY) => GREY,
1930            (RGB, PLANAR_RGB) => RGB,
1931            (RGB, PLANAR_RGBA) => RGB,
1932            (GREY, RGB) => RGB,
1933            (GREY, RGBA) => RGBA,
1934            (GREY, GREY) => GREY,
1935            (GREY, PLANAR_RGB) => GREY,
1936            (GREY, PLANAR_RGBA) => GREY,
1937            (s, d) => {
1938                return Err(Error::NotSupported(format!(
1939                    "Conversion from {} to {}",
1940                    s.display(),
1941                    d.display()
1942                )));
1943            }
1944        };
1945
1946        let need_resize_flip_rotation = rotation != Rotation::None
1947            || flip != Flip::None
1948            || src.width() != dst.width()
1949            || src.height() != dst.height()
1950            || crop.src_rect.is_some_and(|crop| {
1951                crop != Rect {
1952                    left: 0,
1953                    top: 0,
1954                    width: src.width(),
1955                    height: src.height(),
1956                }
1957            })
1958            || crop.dst_rect.is_some_and(|crop| {
1959                crop != Rect {
1960                    left: 0,
1961                    top: 0,
1962                    width: dst.width(),
1963                    height: dst.height(),
1964                }
1965            });
1966
1967        // Simple case: no resize/flip/rotation needed
1968        if !need_resize_flip_rotation {
1969            // Try direct generic conversion (zero-copy path)
1970            if let Ok(()) = Self::convert_format_generic(src, dst) {
1971                return Ok(());
1972            }
1973        }
1974
1975        // Complex case: need intermediate buffers
1976        // First, convert source to intermediate format if needed
1977        let mut tmp_buffer;
1978        let tmp: &TensorImage;
1979        if intermediate != src.fourcc() {
1980            tmp_buffer = TensorImage::new(
1981                src.width(),
1982                src.height(),
1983                intermediate,
1984                Some(edgefirst_tensor::TensorMemory::Mem),
1985            )?;
1986            Self::convert_format(src, &mut tmp_buffer)?;
1987            tmp = &tmp_buffer;
1988        } else {
1989            tmp = src;
1990        }
1991
1992        // Process resize/flip/rotation if needed
1993        if need_resize_flip_rotation {
1994            // Create intermediate buffer for resize output
1995            let mut tmp2 = TensorImage::new(
1996                dst.width(),
1997                dst.height(),
1998                tmp.fourcc(),
1999                Some(edgefirst_tensor::TensorMemory::Mem),
2000            )?;
2001            self.resize_flip_rotate(tmp, &mut tmp2, rotation, flip, crop)?;
2002
2003            // Final conversion to destination (zero-copy into dst)
2004            Self::convert_format_generic(&tmp2, dst)?;
2005        } else {
2006            // Direct conversion (already checked above, but handle edge cases)
2007            Self::convert_format_generic(tmp, dst)?;
2008        }
2009
2010        // Handle destination crop fill if needed
2011        if let (Some(dst_rect), Some(dst_color)) = (crop.dst_rect, crop.dst_color) {
2012            let full_rect = Rect {
2013                left: 0,
2014                top: 0,
2015                width: dst.width(),
2016                height: dst.height(),
2017            };
2018            if dst_rect != full_rect {
2019                Self::fill_image_outside_crop_generic(dst, dst_color, dst_rect)?;
2020            }
2021        }
2022
2023        Ok(())
2024    }
2025
2026    #[cfg(feature = "decoder")]
2027    fn render_to_image(
2028        &mut self,
2029        dst: &mut TensorImage,
2030        detect: &[DetectBox],
2031        segmentation: &[Segmentation],
2032    ) -> Result<()> {
2033        if !matches!(dst.fourcc(), RGBA | RGB) {
2034            return Err(crate::Error::NotSupported(
2035                "CPU image rendering only supports RGBA or RGB images".to_string(),
2036            ));
2037        }
2038
2039        let _timer = FunctionTimer::new("CPUProcessor::render_to_image");
2040
2041        let mut map = dst.tensor.map()?;
2042        let dst_slice = map.as_mut_slice();
2043
2044        self.render_box(dst, dst_slice, detect)?;
2045
2046        if segmentation.is_empty() {
2047            return Ok(());
2048        }
2049
2050        let is_modelpack = segmentation[0].segmentation.shape()[2] > 1;
2051
2052        if is_modelpack {
2053            self.render_modelpack_segmentation(dst, dst_slice, &segmentation[0])?;
2054        } else {
2055            for (seg, detect) in segmentation.iter().zip(detect) {
2056                self.render_yolo_segmentation(dst, dst_slice, seg, detect.label)?;
2057            }
2058        }
2059
2060        Ok(())
2061    }
2062
2063    #[cfg(feature = "decoder")]
2064    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
2065        for (c, new_c) in self.colors.iter_mut().zip(colors.iter()) {
2066            *c = *new_c;
2067        }
2068        Ok(())
2069    }
2070}
2071
2072#[cfg(test)]
2073#[cfg_attr(coverage_nightly, coverage(off))]
2074mod cpu_tests {
2075
2076    use super::*;
2077    use crate::{CPUProcessor, Rotation, TensorImageRef, RGBA};
2078    use edgefirst_tensor::{Tensor, TensorMapTrait, TensorMemory};
2079    use image::buffer::ConvertBuffer;
2080
2081    macro_rules! function {
2082        () => {{
2083            fn f() {}
2084            fn type_name_of<T>(_: T) -> &'static str {
2085                std::any::type_name::<T>()
2086            }
2087            let name = type_name_of(f);
2088
2089            // Find and cut the rest of the path
2090            match &name[..name.len() - 3].rfind(':') {
2091                Some(pos) => &name[pos + 1..name.len() - 3],
2092                None => &name[..name.len() - 3],
2093            }
2094        }};
2095    }
2096
2097    fn compare_images_convert_to_grey(
2098        img1: &TensorImage,
2099        img2: &TensorImage,
2100        threshold: f64,
2101        name: &str,
2102    ) {
2103        assert_eq!(img1.height(), img2.height(), "Heights differ");
2104        assert_eq!(img1.width(), img2.width(), "Widths differ");
2105
2106        let mut img_rgb1 = TensorImage::new(img1.width(), img1.height(), RGBA, None).unwrap();
2107        let mut img_rgb2 = TensorImage::new(img1.width(), img1.height(), RGBA, None).unwrap();
2108        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
2109        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
2110
2111        let image1 = image::RgbaImage::from_vec(
2112            img_rgb1.width() as u32,
2113            img_rgb1.height() as u32,
2114            img_rgb1.tensor().map().unwrap().to_vec(),
2115        )
2116        .unwrap();
2117
2118        let image2 = image::RgbaImage::from_vec(
2119            img_rgb2.width() as u32,
2120            img_rgb2.height() as u32,
2121            img_rgb2.tensor().map().unwrap().to_vec(),
2122        )
2123        .unwrap();
2124
2125        let similarity = image_compare::gray_similarity_structure(
2126            &image_compare::Algorithm::RootMeanSquared,
2127            &image1.convert(),
2128            &image2.convert(),
2129        )
2130        .expect("Image Comparison failed");
2131        if similarity.score < threshold {
2132            // image1.save(format!("{name}_1.png"));
2133            // image2.save(format!("{name}_2.png"));
2134            similarity
2135                .image
2136                .to_color_map()
2137                .save(format!("{name}.png"))
2138                .unwrap();
2139            panic!(
2140                "{name}: converted image and target image have similarity score too low: {} < {}",
2141                similarity.score, threshold
2142            )
2143        }
2144    }
2145
2146    fn compare_images_convert_to_rgb(
2147        img1: &TensorImage,
2148        img2: &TensorImage,
2149        threshold: f64,
2150        name: &str,
2151    ) {
2152        assert_eq!(img1.height(), img2.height(), "Heights differ");
2153        assert_eq!(img1.width(), img2.width(), "Widths differ");
2154
2155        let mut img_rgb1 = TensorImage::new(img1.width(), img1.height(), RGB, None).unwrap();
2156        let mut img_rgb2 = TensorImage::new(img1.width(), img1.height(), RGB, None).unwrap();
2157        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
2158        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
2159
2160        let image1 = image::RgbImage::from_vec(
2161            img_rgb1.width() as u32,
2162            img_rgb1.height() as u32,
2163            img_rgb1.tensor().map().unwrap().to_vec(),
2164        )
2165        .unwrap();
2166
2167        let image2 = image::RgbImage::from_vec(
2168            img_rgb2.width() as u32,
2169            img_rgb2.height() as u32,
2170            img_rgb2.tensor().map().unwrap().to_vec(),
2171        )
2172        .unwrap();
2173
2174        let similarity = image_compare::rgb_similarity_structure(
2175            &image_compare::Algorithm::RootMeanSquared,
2176            &image1,
2177            &image2,
2178        )
2179        .expect("Image Comparison failed");
2180        if similarity.score < threshold {
2181            // image1.save(format!("{name}_1.png"));
2182            // image2.save(format!("{name}_2.png"));
2183            similarity
2184                .image
2185                .to_color_map()
2186                .save(format!("{name}.png"))
2187                .unwrap();
2188            panic!(
2189                "{name}: converted image and target image have similarity score too low: {} < {}",
2190                similarity.score, threshold
2191            )
2192        }
2193    }
2194
2195    fn load_bytes_to_tensor(
2196        width: usize,
2197        height: usize,
2198        fourcc: FourCharCode,
2199        memory: Option<TensorMemory>,
2200        bytes: &[u8],
2201    ) -> Result<TensorImage, Error> {
2202        log::debug!("Current function is {}", function!());
2203        let src = TensorImage::new(width, height, fourcc, memory)?;
2204        src.tensor().map()?.as_mut_slice()[0..bytes.len()].copy_from_slice(bytes);
2205        Ok(src)
2206    }
2207
2208    macro_rules! generate_conversion_tests {
2209        (
2210        $src_fmt:ident,  $src_file:expr, $dst_fmt:ident, $dst_file:expr
2211    ) => {{
2212            // Load source
2213            let src = load_bytes_to_tensor(
2214                1280,
2215                720,
2216                $src_fmt,
2217                None,
2218                include_bytes!(concat!("../../../testdata/", $src_file)),
2219            )?;
2220
2221            // Load destination reference
2222            let dst = load_bytes_to_tensor(
2223                1280,
2224                720,
2225                $dst_fmt,
2226                None,
2227                include_bytes!(concat!("../../../testdata/", $dst_file)),
2228            )?;
2229
2230            let mut converter = CPUProcessor::default();
2231
2232            let mut converted = TensorImage::new(src.width(), src.height(), dst.fourcc(), None)?;
2233
2234            converter.convert(
2235                &src,
2236                &mut converted,
2237                Rotation::None,
2238                Flip::None,
2239                Crop::default(),
2240            )?;
2241
2242            compare_images_convert_to_rgb(&dst, &converted, 0.99, function!());
2243
2244            Ok(())
2245        }};
2246    }
2247
2248    macro_rules! generate_conversion_tests_greyscale {
2249        (
2250        $src_fmt:ident,  $src_file:expr, $dst_fmt:ident, $dst_file:expr
2251    ) => {{
2252            // Load source
2253            let src = load_bytes_to_tensor(
2254                1280,
2255                720,
2256                $src_fmt,
2257                None,
2258                include_bytes!(concat!("../../../testdata/", $src_file)),
2259            )?;
2260
2261            // Load destination reference
2262            let dst = load_bytes_to_tensor(
2263                1280,
2264                720,
2265                $dst_fmt,
2266                None,
2267                include_bytes!(concat!("../../../testdata/", $dst_file)),
2268            )?;
2269
2270            let mut converter = CPUProcessor::default();
2271
2272            let mut converted = TensorImage::new(src.width(), src.height(), dst.fourcc(), None)?;
2273
2274            converter.convert(
2275                &src,
2276                &mut converted,
2277                Rotation::None,
2278                Flip::None,
2279                Crop::default(),
2280            )?;
2281
2282            compare_images_convert_to_grey(&dst, &converted, 0.985, function!());
2283
2284            Ok(())
2285        }};
2286    }
2287
2288    // let mut dsts = [yuyv, rgb, rgba, grey, nv16, planar_rgb, planar_rgba];
2289
2290    #[test]
2291    fn test_cpu_yuyv_to_yuyv() -> Result<()> {
2292        generate_conversion_tests!(YUYV, "camera720p.yuyv", YUYV, "camera720p.yuyv")
2293    }
2294
2295    #[test]
2296    fn test_cpu_yuyv_to_rgb() -> Result<()> {
2297        generate_conversion_tests!(YUYV, "camera720p.yuyv", RGB, "camera720p.rgb")
2298    }
2299
2300    #[test]
2301    fn test_cpu_yuyv_to_rgba() -> Result<()> {
2302        generate_conversion_tests!(YUYV, "camera720p.yuyv", RGBA, "camera720p.rgba")
2303    }
2304
2305    #[test]
2306    fn test_cpu_yuyv_to_grey() -> Result<()> {
2307        generate_conversion_tests!(YUYV, "camera720p.yuyv", GREY, "camera720p.y800")
2308    }
2309
2310    #[test]
2311    fn test_cpu_yuyv_to_nv16() -> Result<()> {
2312        generate_conversion_tests!(YUYV, "camera720p.yuyv", NV16, "camera720p.nv16")
2313    }
2314
2315    #[test]
2316    fn test_cpu_yuyv_to_planar_rgb() -> Result<()> {
2317        generate_conversion_tests!(YUYV, "camera720p.yuyv", PLANAR_RGB, "camera720p.8bps")
2318    }
2319
2320    #[test]
2321    fn test_cpu_yuyv_to_planar_rgba() -> Result<()> {
2322        generate_conversion_tests!(YUYV, "camera720p.yuyv", PLANAR_RGBA, "camera720p.8bpa")
2323    }
2324
2325    #[test]
2326    fn test_cpu_rgb_to_yuyv() -> Result<()> {
2327        generate_conversion_tests!(RGB, "camera720p.rgb", YUYV, "camera720p.yuyv")
2328    }
2329
2330    #[test]
2331    fn test_cpu_rgb_to_rgb() -> Result<()> {
2332        generate_conversion_tests!(RGB, "camera720p.rgb", RGB, "camera720p.rgb")
2333    }
2334
2335    #[test]
2336    fn test_cpu_rgb_to_rgba() -> Result<()> {
2337        generate_conversion_tests!(RGB, "camera720p.rgb", RGBA, "camera720p.rgba")
2338    }
2339
2340    #[test]
2341    fn test_cpu_rgb_to_grey() -> Result<()> {
2342        generate_conversion_tests!(RGB, "camera720p.rgb", GREY, "camera720p.y800")
2343    }
2344
2345    #[test]
2346    fn test_cpu_rgb_to_nv16() -> Result<()> {
2347        generate_conversion_tests!(RGB, "camera720p.rgb", NV16, "camera720p.nv16")
2348    }
2349
2350    #[test]
2351    fn test_cpu_rgb_to_planar_rgb() -> Result<()> {
2352        generate_conversion_tests!(RGB, "camera720p.rgb", PLANAR_RGB, "camera720p.8bps")
2353    }
2354
2355    #[test]
2356    fn test_cpu_rgb_to_planar_rgba() -> Result<()> {
2357        generate_conversion_tests!(RGB, "camera720p.rgb", PLANAR_RGBA, "camera720p.8bpa")
2358    }
2359
2360    #[test]
2361    fn test_cpu_rgba_to_yuyv() -> Result<()> {
2362        generate_conversion_tests!(RGBA, "camera720p.rgba", YUYV, "camera720p.yuyv")
2363    }
2364
2365    #[test]
2366    fn test_cpu_rgba_to_rgb() -> Result<()> {
2367        generate_conversion_tests!(RGBA, "camera720p.rgba", RGB, "camera720p.rgb")
2368    }
2369
2370    #[test]
2371    fn test_cpu_rgba_to_rgba() -> Result<()> {
2372        generate_conversion_tests!(RGBA, "camera720p.rgba", RGBA, "camera720p.rgba")
2373    }
2374
2375    #[test]
2376    fn test_cpu_rgba_to_grey() -> Result<()> {
2377        generate_conversion_tests!(RGBA, "camera720p.rgba", GREY, "camera720p.y800")
2378    }
2379
2380    #[test]
2381    fn test_cpu_rgba_to_nv16() -> Result<()> {
2382        generate_conversion_tests!(RGBA, "camera720p.rgba", NV16, "camera720p.nv16")
2383    }
2384
2385    #[test]
2386    fn test_cpu_rgba_to_planar_rgb() -> Result<()> {
2387        generate_conversion_tests!(RGBA, "camera720p.rgba", PLANAR_RGB, "camera720p.8bps")
2388    }
2389
2390    #[test]
2391    fn test_cpu_rgba_to_planar_rgba() -> Result<()> {
2392        generate_conversion_tests!(RGBA, "camera720p.rgba", PLANAR_RGBA, "camera720p.8bpa")
2393    }
2394
2395    #[test]
2396    fn test_cpu_nv12_to_rgb() -> Result<()> {
2397        generate_conversion_tests!(NV12, "camera720p.nv12", RGB, "camera720p.rgb")
2398    }
2399
2400    #[test]
2401    fn test_cpu_nv12_to_yuyv() -> Result<()> {
2402        generate_conversion_tests!(NV12, "camera720p.nv12", YUYV, "camera720p.yuyv")
2403    }
2404
2405    #[test]
2406    fn test_cpu_nv12_to_rgba() -> Result<()> {
2407        generate_conversion_tests!(NV12, "camera720p.nv12", RGBA, "camera720p.rgba")
2408    }
2409
2410    #[test]
2411    fn test_cpu_nv12_to_grey() -> Result<()> {
2412        generate_conversion_tests!(NV12, "camera720p.nv12", GREY, "camera720p.y800")
2413    }
2414
2415    #[test]
2416    fn test_cpu_nv12_to_nv16() -> Result<()> {
2417        generate_conversion_tests!(NV12, "camera720p.nv12", NV16, "camera720p.nv16")
2418    }
2419
2420    #[test]
2421    fn test_cpu_nv12_to_planar_rgb() -> Result<()> {
2422        generate_conversion_tests!(NV12, "camera720p.nv12", PLANAR_RGB, "camera720p.8bps")
2423    }
2424
2425    #[test]
2426    fn test_cpu_nv12_to_planar_rgba() -> Result<()> {
2427        generate_conversion_tests!(NV12, "camera720p.nv12", PLANAR_RGBA, "camera720p.8bpa")
2428    }
2429
2430    #[test]
2431    fn test_cpu_grey_to_yuyv() -> Result<()> {
2432        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", YUYV, "camera720p.yuyv")
2433    }
2434
2435    #[test]
2436    fn test_cpu_grey_to_rgb() -> Result<()> {
2437        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", RGB, "camera720p.rgb")
2438    }
2439
2440    #[test]
2441    fn test_cpu_grey_to_rgba() -> Result<()> {
2442        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", RGBA, "camera720p.rgba")
2443    }
2444
2445    #[test]
2446    fn test_cpu_grey_to_grey() -> Result<()> {
2447        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", GREY, "camera720p.y800")
2448    }
2449
2450    #[test]
2451    fn test_cpu_grey_to_nv16() -> Result<()> {
2452        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", NV16, "camera720p.nv16")
2453    }
2454
2455    #[test]
2456    fn test_cpu_grey_to_planar_rgb() -> Result<()> {
2457        generate_conversion_tests_greyscale!(GREY, "camera720p.y800", PLANAR_RGB, "camera720p.8bps")
2458    }
2459
2460    #[test]
2461    fn test_cpu_grey_to_planar_rgba() -> Result<()> {
2462        generate_conversion_tests_greyscale!(
2463            GREY,
2464            "camera720p.y800",
2465            PLANAR_RGBA,
2466            "camera720p.8bpa"
2467        )
2468    }
2469
2470    #[test]
2471    fn test_cpu_nearest() -> Result<()> {
2472        // Load source
2473        let src = load_bytes_to_tensor(2, 1, RGB, None, &[0, 0, 0, 255, 255, 255])?;
2474
2475        let mut converter = CPUProcessor::new_nearest();
2476
2477        let mut converted = TensorImage::new(4, 1, RGB, None)?;
2478
2479        converter.convert(
2480            &src,
2481            &mut converted,
2482            Rotation::None,
2483            Flip::None,
2484            Crop::default(),
2485        )?;
2486
2487        assert_eq!(
2488            &converted.tensor().map()?.as_slice(),
2489            &[0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255]
2490        );
2491
2492        Ok(())
2493    }
2494
2495    #[test]
2496    fn test_cpu_rotate_cw() -> Result<()> {
2497        // Load source
2498        let src = load_bytes_to_tensor(
2499            2,
2500            2,
2501            RGBA,
2502            None,
2503            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2504        )?;
2505
2506        let mut converter = CPUProcessor::default();
2507
2508        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2509
2510        converter.convert(
2511            &src,
2512            &mut converted,
2513            Rotation::Clockwise90,
2514            Flip::None,
2515            Crop::default(),
2516        )?;
2517
2518        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[2, 2, 2, 255]);
2519        assert_eq!(
2520            &converted.tensor().map()?.as_slice()[12..16],
2521            &[0, 0, 0, 255]
2522        );
2523        assert_eq!(
2524            &converted.tensor().map()?.as_slice()[48..52],
2525            &[3, 3, 3, 255]
2526        );
2527
2528        assert_eq!(
2529            &converted.tensor().map()?.as_slice()[60..64],
2530            &[1, 1, 1, 255]
2531        );
2532
2533        Ok(())
2534    }
2535
2536    #[test]
2537    fn test_cpu_rotate_ccw() -> Result<()> {
2538        // Load source
2539        let src = load_bytes_to_tensor(
2540            2,
2541            2,
2542            RGBA,
2543            None,
2544            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2545        )?;
2546
2547        let mut converter = CPUProcessor::default();
2548
2549        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2550
2551        converter.convert(
2552            &src,
2553            &mut converted,
2554            Rotation::CounterClockwise90,
2555            Flip::None,
2556            Crop::default(),
2557        )?;
2558
2559        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[1, 1, 1, 255]);
2560        assert_eq!(
2561            &converted.tensor().map()?.as_slice()[12..16],
2562            &[3, 3, 3, 255]
2563        );
2564        assert_eq!(
2565            &converted.tensor().map()?.as_slice()[48..52],
2566            &[0, 0, 0, 255]
2567        );
2568
2569        assert_eq!(
2570            &converted.tensor().map()?.as_slice()[60..64],
2571            &[2, 2, 2, 255]
2572        );
2573
2574        Ok(())
2575    }
2576
2577    #[test]
2578    fn test_cpu_rotate_180() -> Result<()> {
2579        // Load source
2580        let src = load_bytes_to_tensor(
2581            2,
2582            2,
2583            RGBA,
2584            None,
2585            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2586        )?;
2587
2588        let mut converter = CPUProcessor::default();
2589
2590        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2591
2592        converter.convert(
2593            &src,
2594            &mut converted,
2595            Rotation::Rotate180,
2596            Flip::None,
2597            Crop::default(),
2598        )?;
2599
2600        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[3, 3, 3, 255]);
2601        assert_eq!(
2602            &converted.tensor().map()?.as_slice()[12..16],
2603            &[2, 2, 2, 255]
2604        );
2605        assert_eq!(
2606            &converted.tensor().map()?.as_slice()[48..52],
2607            &[1, 1, 1, 255]
2608        );
2609
2610        assert_eq!(
2611            &converted.tensor().map()?.as_slice()[60..64],
2612            &[0, 0, 0, 255]
2613        );
2614
2615        Ok(())
2616    }
2617
2618    #[test]
2619    fn test_cpu_flip_v() -> Result<()> {
2620        // Load source
2621        let src = load_bytes_to_tensor(
2622            2,
2623            2,
2624            RGBA,
2625            None,
2626            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2627        )?;
2628
2629        let mut converter = CPUProcessor::default();
2630
2631        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2632
2633        converter.convert(
2634            &src,
2635            &mut converted,
2636            Rotation::None,
2637            Flip::Vertical,
2638            Crop::default(),
2639        )?;
2640
2641        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[2, 2, 2, 255]);
2642        assert_eq!(
2643            &converted.tensor().map()?.as_slice()[12..16],
2644            &[3, 3, 3, 255]
2645        );
2646        assert_eq!(
2647            &converted.tensor().map()?.as_slice()[48..52],
2648            &[0, 0, 0, 255]
2649        );
2650
2651        assert_eq!(
2652            &converted.tensor().map()?.as_slice()[60..64],
2653            &[1, 1, 1, 255]
2654        );
2655
2656        Ok(())
2657    }
2658
2659    #[test]
2660    fn test_cpu_flip_h() -> Result<()> {
2661        // Load source
2662        let src = load_bytes_to_tensor(
2663            2,
2664            2,
2665            RGBA,
2666            None,
2667            &[0, 0, 0, 255, 1, 1, 1, 255, 2, 2, 2, 255, 3, 3, 3, 255],
2668        )?;
2669
2670        let mut converter = CPUProcessor::default();
2671
2672        let mut converted = TensorImage::new(4, 4, RGBA, None)?;
2673
2674        converter.convert(
2675            &src,
2676            &mut converted,
2677            Rotation::None,
2678            Flip::Horizontal,
2679            Crop::default(),
2680        )?;
2681
2682        assert_eq!(&converted.tensor().map()?.as_slice()[0..4], &[1, 1, 1, 255]);
2683        assert_eq!(
2684            &converted.tensor().map()?.as_slice()[12..16],
2685            &[0, 0, 0, 255]
2686        );
2687        assert_eq!(
2688            &converted.tensor().map()?.as_slice()[48..52],
2689            &[3, 3, 3, 255]
2690        );
2691
2692        assert_eq!(
2693            &converted.tensor().map()?.as_slice()[60..64],
2694            &[2, 2, 2, 255]
2695        );
2696
2697        Ok(())
2698    }
2699
2700    #[test]
2701    fn test_cpu_src_crop() -> Result<()> {
2702        // Load source
2703        let src = load_bytes_to_tensor(2, 2, GREY, None, &[10, 20, 30, 40])?;
2704
2705        let mut converter = CPUProcessor::default();
2706
2707        let mut converted = TensorImage::new(2, 2, RGBA, None)?;
2708
2709        converter.convert(
2710            &src,
2711            &mut converted,
2712            Rotation::None,
2713            Flip::None,
2714            Crop::new().with_src_rect(Some(Rect::new(0, 0, 1, 2))),
2715        )?;
2716
2717        assert_eq!(
2718            converted.tensor().map()?.as_slice(),
2719            &[10, 10, 10, 255, 13, 13, 13, 255, 30, 30, 30, 255, 33, 33, 33, 255]
2720        );
2721        Ok(())
2722    }
2723
2724    #[test]
2725    fn test_cpu_dst_crop() -> Result<()> {
2726        // Load source
2727        let src = load_bytes_to_tensor(2, 2, GREY, None, &[2, 4, 6, 8])?;
2728
2729        let mut converter = CPUProcessor::default();
2730
2731        let mut converted =
2732            load_bytes_to_tensor(2, 2, YUYV, None, &[200, 128, 200, 128, 200, 128, 200, 128])?;
2733
2734        converter.convert(
2735            &src,
2736            &mut converted,
2737            Rotation::None,
2738            Flip::None,
2739            Crop::new().with_dst_rect(Some(Rect::new(0, 0, 2, 1))),
2740        )?;
2741
2742        assert_eq!(
2743            converted.tensor().map()?.as_slice(),
2744            &[20, 128, 21, 128, 200, 128, 200, 128]
2745        );
2746        Ok(())
2747    }
2748
2749    #[test]
2750    fn test_cpu_fill_rgba() -> Result<()> {
2751        // Load source
2752        let src = load_bytes_to_tensor(1, 1, RGBA, None, &[3, 3, 3, 255])?;
2753
2754        let mut converter = CPUProcessor::default();
2755
2756        let mut converted = TensorImage::new(2, 2, RGBA, None)?;
2757
2758        converter.convert(
2759            &src,
2760            &mut converted,
2761            Rotation::None,
2762            Flip::None,
2763            Crop {
2764                src_rect: None,
2765                dst_rect: Some(Rect {
2766                    left: 1,
2767                    top: 1,
2768                    width: 1,
2769                    height: 1,
2770                }),
2771                dst_color: Some([255, 0, 0, 255]),
2772            },
2773        )?;
2774
2775        assert_eq!(
2776            converted.tensor().map()?.as_slice(),
2777            &[255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 3, 3, 3, 255]
2778        );
2779        Ok(())
2780    }
2781
2782    #[test]
2783    fn test_cpu_fill_yuyv() -> Result<()> {
2784        // Load source
2785        let src = load_bytes_to_tensor(2, 1, RGBA, None, &[3, 3, 3, 255, 3, 3, 3, 255])?;
2786
2787        let mut converter = CPUProcessor::default();
2788
2789        let mut converted = TensorImage::new(2, 3, YUYV, None)?;
2790
2791        converter.convert(
2792            &src,
2793            &mut converted,
2794            Rotation::None,
2795            Flip::None,
2796            Crop {
2797                src_rect: None,
2798                dst_rect: Some(Rect {
2799                    left: 0,
2800                    top: 1,
2801                    width: 2,
2802                    height: 1,
2803                }),
2804                dst_color: Some([255, 0, 0, 255]),
2805            },
2806        )?;
2807
2808        assert_eq!(
2809            converted.tensor().map()?.as_slice(),
2810            &[63, 102, 63, 240, 19, 128, 19, 128, 63, 102, 63, 240]
2811        );
2812        Ok(())
2813    }
2814
2815    #[test]
2816    fn test_cpu_fill_grey() -> Result<()> {
2817        // Load source
2818        let src = load_bytes_to_tensor(2, 1, RGBA, None, &[3, 3, 3, 255, 3, 3, 3, 255])?;
2819
2820        let mut converter = CPUProcessor::default();
2821
2822        let mut converted = TensorImage::new(2, 3, GREY, None)?;
2823
2824        converter.convert(
2825            &src,
2826            &mut converted,
2827            Rotation::None,
2828            Flip::None,
2829            Crop {
2830                src_rect: None,
2831                dst_rect: Some(Rect {
2832                    left: 0,
2833                    top: 1,
2834                    width: 2,
2835                    height: 1,
2836                }),
2837                dst_color: Some([200, 200, 200, 255]),
2838            },
2839        )?;
2840
2841        assert_eq!(
2842            converted.tensor().map()?.as_slice(),
2843            &[200, 200, 3, 3, 200, 200]
2844        );
2845        Ok(())
2846    }
2847
2848    #[test]
2849    #[cfg(feature = "decoder")]
2850    fn test_segmentation() {
2851        use edgefirst_decoder::Segmentation;
2852        use ndarray::Array3;
2853
2854        let mut image = TensorImage::load(
2855            include_bytes!("../../../testdata/giraffe.jpg"),
2856            Some(RGBA),
2857            None,
2858        )
2859        .unwrap();
2860
2861        let mut segmentation = Array3::from_shape_vec(
2862            (2, 160, 160),
2863            include_bytes!("../../../testdata/modelpack_seg_2x160x160.bin").to_vec(),
2864        )
2865        .unwrap();
2866        segmentation.swap_axes(0, 1);
2867        segmentation.swap_axes(1, 2);
2868        let segmentation = segmentation.as_standard_layout().to_owned();
2869
2870        let seg = Segmentation {
2871            segmentation,
2872            xmin: 0.0,
2873            ymin: 0.0,
2874            xmax: 1.0,
2875            ymax: 1.0,
2876        };
2877
2878        let mut renderer = CPUProcessor::new();
2879        renderer.render_to_image(&mut image, &[], &[seg]).unwrap();
2880
2881        image.save_jpeg("test_segmentation.jpg", 80).unwrap();
2882    }
2883
2884    #[test]
2885    #[cfg(feature = "decoder")]
2886    fn test_segmentation_yolo() {
2887        use edgefirst_decoder::Segmentation;
2888        use ndarray::Array3;
2889
2890        let mut image = TensorImage::load(
2891            include_bytes!("../../../testdata/giraffe.jpg"),
2892            Some(RGBA),
2893            None,
2894        )
2895        .unwrap();
2896
2897        let segmentation = Array3::from_shape_vec(
2898            (76, 55, 1),
2899            include_bytes!("../../../testdata/yolov8_seg_crop_76x55.bin").to_vec(),
2900        )
2901        .unwrap();
2902
2903        let detect = DetectBox {
2904            bbox: [0.59375, 0.25, 0.9375, 0.725].into(),
2905            score: 0.99,
2906            label: 1,
2907        };
2908
2909        let seg = Segmentation {
2910            segmentation,
2911            xmin: 0.59375,
2912            ymin: 0.25,
2913            xmax: 0.9375,
2914            ymax: 0.725,
2915        };
2916
2917        let mut renderer = CPUProcessor::new();
2918        renderer
2919            .set_class_colors(&[[255, 255, 0, 233], [128, 128, 255, 100]])
2920            .unwrap();
2921        assert_eq!(renderer.colors[1], [128, 128, 255, 100]);
2922        renderer
2923            .render_to_image(&mut image, &[detect], &[seg])
2924            .unwrap();
2925        let expected = TensorImage::load(
2926            include_bytes!("../../../testdata/output_render_cpu.jpg"),
2927            Some(RGBA),
2928            None,
2929        )
2930        .unwrap();
2931        compare_images_convert_to_rgb(&image, &expected, 0.99, function!());
2932    }
2933
2934    // =========================================================================
2935    // Generic Conversion Tests (TensorImageRef support)
2936    // =========================================================================
2937
2938    #[test]
2939    fn test_convert_rgb_to_planar_rgb_generic() {
2940        // Create RGB source image
2941        let mut src = TensorImage::new(4, 4, RGB, None).unwrap();
2942        {
2943            let mut map = src.tensor_mut().map().unwrap();
2944            let data = map.as_mut_slice();
2945            // Fill with pattern: pixel 0 = [10, 20, 30], pixel 1 = [40, 50, 60], etc.
2946            for i in 0..16 {
2947                data[i * 3] = (i * 10) as u8;
2948                data[i * 3 + 1] = (i * 10 + 1) as u8;
2949                data[i * 3 + 2] = (i * 10 + 2) as u8;
2950            }
2951        }
2952
2953        // Create planar RGB destination using TensorImageRef
2954        let mut tensor = Tensor::<u8>::new(&[3, 4, 4], None, None).unwrap();
2955        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
2956
2957        CPUProcessor::convert_format_generic(&src, &mut dst).unwrap();
2958
2959        // Verify the conversion - check first few pixels of each plane
2960        let map = dst.tensor().map().unwrap();
2961        let data = map.as_slice();
2962
2963        // R plane starts at 0, G at 16, B at 32
2964        assert_eq!(data[0], 0); // R of pixel 0
2965        assert_eq!(data[16], 1); // G of pixel 0
2966        assert_eq!(data[32], 2); // B of pixel 0
2967
2968        assert_eq!(data[1], 10); // R of pixel 1
2969        assert_eq!(data[17], 11); // G of pixel 1
2970        assert_eq!(data[33], 12); // B of pixel 1
2971    }
2972
2973    #[test]
2974    fn test_convert_rgba_to_planar_rgb_generic() {
2975        // Create RGBA source image
2976        let mut src = TensorImage::new(4, 4, RGBA, None).unwrap();
2977        {
2978            let mut map = src.tensor_mut().map().unwrap();
2979            let data = map.as_mut_slice();
2980            // Fill with pattern
2981            for i in 0..16 {
2982                data[i * 4] = (i * 10) as u8; // R
2983                data[i * 4 + 1] = (i * 10 + 1) as u8; // G
2984                data[i * 4 + 2] = (i * 10 + 2) as u8; // B
2985                data[i * 4 + 3] = 255; // A (ignored)
2986            }
2987        }
2988
2989        // Create planar RGB destination
2990        let mut tensor = Tensor::<u8>::new(&[3, 4, 4], None, None).unwrap();
2991        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
2992
2993        CPUProcessor::convert_format_generic(&src, &mut dst).unwrap();
2994
2995        // Verify the conversion
2996        let map = dst.tensor().map().unwrap();
2997        let data = map.as_slice();
2998
2999        assert_eq!(data[0], 0); // R of pixel 0
3000        assert_eq!(data[16], 1); // G of pixel 0
3001        assert_eq!(data[32], 2); // B of pixel 0
3002    }
3003
3004    #[test]
3005    fn test_copy_image_generic_same_format() {
3006        // Create source image with data
3007        let mut src = TensorImage::new(4, 4, RGB, None).unwrap();
3008        {
3009            let mut map = src.tensor_mut().map().unwrap();
3010            let data = map.as_mut_slice();
3011            for (i, byte) in data.iter_mut().enumerate() {
3012                *byte = (i % 256) as u8;
3013            }
3014        }
3015
3016        // Create destination tensor
3017        let mut tensor = Tensor::<u8>::new(&[4, 4, 3], None, None).unwrap();
3018        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB).unwrap();
3019
3020        CPUProcessor::convert_format_generic(&src, &mut dst).unwrap();
3021
3022        // Verify data was copied
3023        let src_map = src.tensor().map().unwrap();
3024        let dst_map = dst.tensor().map().unwrap();
3025        assert_eq!(src_map.as_slice(), dst_map.as_slice());
3026    }
3027
3028    #[test]
3029    fn test_convert_format_generic_unsupported() {
3030        // Try unsupported conversion (NV12 to PLANAR_RGB)
3031        let src = TensorImage::new(8, 8, NV12, None).unwrap();
3032        let mut tensor = Tensor::<u8>::new(&[3, 8, 8], None, None).unwrap();
3033        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
3034
3035        let result = CPUProcessor::convert_format_generic(&src, &mut dst);
3036        assert!(result.is_err());
3037        assert!(matches!(result, Err(Error::NotSupported(_))));
3038    }
3039
3040    #[test]
3041    fn test_fill_image_outside_crop_generic_rgba() {
3042        let mut tensor = Tensor::<u8>::new(&[4, 4, 4], None, None).unwrap();
3043        // Initialize to zeros
3044        tensor.map().unwrap().as_mut_slice().fill(0);
3045
3046        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA).unwrap();
3047
3048        // Fill outside a 2x2 crop in the center with red
3049        let crop = Rect::new(1, 1, 2, 2);
3050        CPUProcessor::fill_image_outside_crop_generic(&mut dst, [255, 0, 0, 255], crop).unwrap();
3051
3052        let map = dst.tensor().map().unwrap();
3053        let data = map.as_slice();
3054
3055        // Top-left corner should be filled (red)
3056        assert_eq!(&data[0..4], &[255, 0, 0, 255]);
3057
3058        // Center pixel (1,1) should still be zero (inside crop)
3059        // row=1, col=1, width=4, bytes_per_pixel=4 -> offset = (1*4 + 1) * 4 = 20
3060        let center_offset = 20;
3061        assert_eq!(&data[center_offset..center_offset + 4], &[0, 0, 0, 0]);
3062    }
3063
3064    #[test]
3065    fn test_fill_image_outside_crop_generic_rgb() {
3066        let mut tensor = Tensor::<u8>::new(&[4, 4, 3], None, None).unwrap();
3067        tensor.map().unwrap().as_mut_slice().fill(0);
3068
3069        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB).unwrap();
3070
3071        let crop = Rect::new(1, 1, 2, 2);
3072        CPUProcessor::fill_image_outside_crop_generic(&mut dst, [0, 255, 0, 255], crop).unwrap();
3073
3074        let map = dst.tensor().map().unwrap();
3075        let data = map.as_slice();
3076
3077        // Top-left corner should be green
3078        assert_eq!(&data[0..3], &[0, 255, 0]);
3079
3080        // Center pixel (1,1): row=1, col=1, width=4, bytes=3 -> offset = (1*4 + 1) * 3
3081        // = 15
3082        let center_offset = 15;
3083        assert_eq!(&data[center_offset..center_offset + 3], &[0, 0, 0]);
3084    }
3085
3086    #[test]
3087    fn test_fill_image_outside_crop_generic_planar_rgb() {
3088        let mut tensor = Tensor::<u8>::new(&[3, 4, 4], None, None).unwrap();
3089        tensor.map().unwrap().as_mut_slice().fill(0);
3090
3091        let mut dst = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
3092
3093        let crop = Rect::new(1, 1, 2, 2);
3094        CPUProcessor::fill_image_outside_crop_generic(&mut dst, [128, 64, 32, 255], crop).unwrap();
3095
3096        let map = dst.tensor().map().unwrap();
3097        let data = map.as_slice();
3098
3099        // For planar: R plane is [0..16], G plane is [16..32], B plane is [32..48]
3100        // Top-left pixel (0,0) should have R=128, G=64, B=32
3101        assert_eq!(data[0], 128); // R plane, pixel 0
3102        assert_eq!(data[16], 64); // G plane, pixel 0
3103        assert_eq!(data[32], 32); // B plane, pixel 0
3104
3105        // Center pixel (1,1): row=1, col=1, width=4 -> index = 1*4 + 1 = 5
3106        let center_idx = 5;
3107        assert_eq!(data[center_idx], 0); // R
3108        assert_eq!(data[16 + center_idx], 0); // G
3109        assert_eq!(data[32 + center_idx], 0); // B
3110    }
3111}