#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
pub const GPU_DMA_BUF_PITCH_ALIGNMENT_BYTES: usize = 64;
pub fn align_width_for_gpu_pitch(width: usize, bpp: usize) -> usize {
if bpp == 0 || width == 0 {
return width;
}
let Some(lcm_alignment) = checked_num_integer_lcm(GPU_DMA_BUF_PITCH_ALIGNMENT_BYTES, bpp)
else {
log::warn!(
"align_width_for_gpu_pitch: lcm({GPU_DMA_BUF_PITCH_ALIGNMENT_BYTES}, {bpp}) \
overflows usize, returning unaligned width {width}"
);
return width;
};
if lcm_alignment == 0 {
return width;
}
debug_assert_eq!(lcm_alignment % bpp, 0);
let width_alignment = lcm_alignment / bpp;
if width_alignment == 0 {
return width;
}
let remainder = width % width_alignment;
if remainder == 0 {
return width;
}
let pad = width_alignment - remainder;
match width.checked_add(pad) {
Some(aligned) => aligned,
None => {
log::warn!(
"align_width_for_gpu_pitch: width {width} + pad {pad} overflows usize, \
returning unaligned (caller should use a smaller width or pre-aligned size)"
);
width
}
}
}
#[cfg(target_os = "linux")]
pub(crate) fn align_pitch_bytes_to_gpu_alignment(min_pitch_bytes: usize) -> Option<usize> {
let alignment = GPU_DMA_BUF_PITCH_ALIGNMENT_BYTES;
if min_pitch_bytes == 0 {
return Some(0);
}
let remainder = min_pitch_bytes % alignment;
if remainder == 0 {
return Some(min_pitch_bytes);
}
min_pitch_bytes.checked_add(alignment - remainder)
}
fn checked_num_integer_lcm(a: usize, b: usize) -> Option<usize> {
if a == 0 || b == 0 {
return Some(0);
}
let g = num_integer_gcd(a, b);
(a / g).checked_mul(b)
}
fn num_integer_gcd(a: usize, b: usize) -> usize {
if b == 0 {
a
} else {
num_integer_gcd(b, a % b)
}
}
pub fn primary_plane_bpp(format: PixelFormat, elem: usize) -> Option<usize> {
use edgefirst_tensor::PixelLayout;
match format.layout() {
PixelLayout::Packed => Some(format.channels() * elem),
PixelLayout::Planar => Some(elem),
PixelLayout::SemiPlanar => Some(elem),
_ => None,
}
}
#[cfg(target_os = "linux")]
pub(crate) fn padded_dma_pitch_for(
fmt: PixelFormat,
width: usize,
memory: &Option<TensorMemory>,
) -> Option<usize> {
match memory {
Some(TensorMemory::Dma) => {}
None if edgefirst_tensor::is_dma_available() => {}
_ => return None,
}
if fmt.layout() != PixelLayout::Packed {
return None;
}
let bpp = primary_plane_bpp(fmt, 1)?;
let natural = width.checked_mul(bpp)?;
let aligned = align_pitch_bytes_to_gpu_alignment(natural)?;
if aligned > natural {
Some(aligned)
} else {
None
}
}
#[cfg(target_os = "linux")]
pub(crate) fn copy_packed_to_padded_dma(src: &Tensor<u8>, dst: &mut Tensor<u8>) -> Result<()> {
let width = dst.width().ok_or(Error::NotAnImage)?;
let height = dst.height().ok_or(Error::NotAnImage)?;
let fmt = dst.format().ok_or(Error::NotAnImage)?;
let src_width = src.width().ok_or(Error::NotAnImage)?;
let src_height = src.height().ok_or(Error::NotAnImage)?;
let src_fmt = src.format().ok_or(Error::NotAnImage)?;
if src_width != width || src_height != height || src_fmt != fmt {
return Err(Error::Internal(format!(
"copy_packed_to_padded_dma: src and dst image metadata must match \
(src: {src_width}x{src_height} {src_fmt:?}, dst: {width}x{height} {fmt:?})"
)));
}
let bpp = primary_plane_bpp(fmt, 1).ok_or_else(|| {
Error::NotSupported(format!(
"copy_packed_to_padded_dma: unknown bpp for {fmt:?}"
))
})?;
let natural = width.checked_mul(bpp).ok_or_else(|| {
Error::Internal(format!(
"copy_packed_to_padded_dma: width {width} × bpp {bpp} overflows"
))
})?;
let dst_stride = dst.effective_row_stride().ok_or_else(|| {
Error::Internal("copy_packed_to_padded_dma: dst has no effective row stride".into())
})?;
let src_map = src.map()?;
let src_bytes: &[u8] = &src_map;
let mut dst_map = dst.map()?;
let dst_bytes: &mut [u8] = &mut dst_map;
if src_bytes.len() < natural.saturating_mul(height) {
return Err(Error::Internal(format!(
"copy_packed_to_padded_dma: src has {} bytes, need {} ({}x{} @ {} bpp)",
src_bytes.len(),
natural.saturating_mul(height),
width,
height,
bpp,
)));
}
if dst_bytes.len() < dst_stride.saturating_mul(height) {
return Err(Error::Internal(format!(
"copy_packed_to_padded_dma: dst has {} bytes, need {} ({} stride × {} rows)",
dst_bytes.len(),
dst_stride.saturating_mul(height),
dst_stride,
height,
)));
}
for row in 0..height {
let s = row * natural;
let d = row * dst_stride;
dst_bytes[d..d + natural].copy_from_slice(&src_bytes[s..s + natural]);
}
Ok(())
}
use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
use edgefirst_tensor::{
DType, PixelFormat, PixelLayout, Tensor, TensorDyn, TensorMemory, TensorTrait as _,
};
use enum_dispatch::enum_dispatch;
use std::{fmt::Display, time::Instant};
use zune_jpeg::{
zune_core::{colorspace::ColorSpace, options::DecoderOptions},
JpegDecoder,
};
use zune_png::PngDecoder;
pub use cpu::CPUProcessor;
pub use error::{Error, Result};
#[cfg(target_os = "linux")]
pub use g2d::G2DProcessor;
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
pub use opengl_headless::GLProcessorThreaded;
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
pub use opengl_headless::Int8InterpolationMode;
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
mod cpu;
mod error;
mod g2d;
#[path = "gl/mod.rs"]
mod opengl_headless;
fn rotate_flip_to_dyn(
src: &Tensor<u8>,
src_fmt: PixelFormat,
rotation: Rotation,
flip: Flip,
memory: Option<TensorMemory>,
) -> Result<TensorDyn, Error> {
let src_w = src.width().unwrap();
let src_h = src.height().unwrap();
let channels = src_fmt.channels();
let (dst_w, dst_h) = match rotation {
Rotation::None | Rotation::Rotate180 => (src_w, src_h),
Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src_h, src_w),
};
#[cfg(target_os = "linux")]
if let Some(aligned_pitch) = padded_dma_pitch_for(src_fmt, dst_w, &memory) {
let tmp = Tensor::<u8>::image(dst_w, dst_h, src_fmt, Some(TensorMemory::Mem))?;
let src_map = src.map()?;
let mut tmp_map = tmp.map()?;
CPUProcessor::flip_rotate_ndarray_pf(
&src_map,
&mut tmp_map,
dst_w,
dst_h,
channels,
rotation,
flip,
)?;
drop(tmp_map);
drop(src_map);
let mut dma = Tensor::<u8>::image_with_stride(
dst_w,
dst_h,
src_fmt,
aligned_pitch,
Some(TensorMemory::Dma),
)?;
copy_packed_to_padded_dma(&tmp, &mut dma)?;
return Ok(TensorDyn::from(dma));
}
let dst = Tensor::<u8>::image(dst_w, dst_h, src_fmt, memory)?;
let src_map = src.map()?;
let mut dst_map = dst.map()?;
CPUProcessor::flip_rotate_ndarray_pf(
&src_map,
&mut dst_map,
dst_w,
dst_h,
channels,
rotation,
flip,
)?;
drop(dst_map);
drop(src_map);
Ok(TensorDyn::from(dst))
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Rotation {
None = 0,
Clockwise90 = 1,
Rotate180 = 2,
CounterClockwise90 = 3,
}
impl Rotation {
pub fn from_degrees_clockwise(angle: usize) -> Rotation {
match angle.rem_euclid(360) {
0 => Rotation::None,
90 => Rotation::Clockwise90,
180 => Rotation::Rotate180,
270 => Rotation::CounterClockwise90,
_ => panic!("rotation angle is not a multiple of 90"),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Flip {
None = 0,
Vertical = 1,
Horizontal = 2,
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub enum ColorMode {
#[default]
Class,
Instance,
Track,
}
impl ColorMode {
#[inline]
pub fn index(self, idx: usize, label: usize) -> usize {
match self {
ColorMode::Class => label,
ColorMode::Instance | ColorMode::Track => idx,
}
}
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum MaskResolution {
#[default]
Proto,
Scaled {
width: u32,
height: u32,
},
}
#[derive(Debug, Clone, Copy)]
pub struct MaskOverlay<'a> {
pub background: Option<&'a TensorDyn>,
pub opacity: f32,
pub letterbox: Option<[f32; 4]>,
pub color_mode: ColorMode,
}
impl Default for MaskOverlay<'_> {
fn default() -> Self {
Self {
background: None,
opacity: 1.0,
letterbox: None,
color_mode: ColorMode::Class,
}
}
}
impl<'a> MaskOverlay<'a> {
pub fn new() -> Self {
Self::default()
}
pub fn with_background(mut self, bg: &'a TensorDyn) -> Self {
self.background = Some(bg);
self
}
pub fn with_opacity(mut self, opacity: f32) -> Self {
self.opacity = opacity.clamp(0.0, 1.0);
self
}
pub fn with_color_mode(mut self, mode: ColorMode) -> Self {
self.color_mode = mode;
self
}
pub fn with_letterbox_crop(mut self, crop: &Crop, model_w: usize, model_h: usize) -> Self {
if let Some(r) = crop.dst_rect {
self.letterbox = Some([
r.left as f32 / model_w as f32,
r.top as f32 / model_h as f32,
(r.left + r.width) as f32 / model_w as f32,
(r.top + r.height) as f32 / model_h as f32,
]);
}
self
}
}
#[inline]
fn unletter_bbox(bbox: DetectBox, lb: [f32; 4]) -> DetectBox {
let b = bbox.bbox.to_canonical();
let [lx0, ly0, lx1, ly1] = lb;
let inv_w = if lx1 > lx0 { 1.0 / (lx1 - lx0) } else { 1.0 };
let inv_h = if ly1 > ly0 { 1.0 / (ly1 - ly0) } else { 1.0 };
DetectBox {
bbox: edgefirst_decoder::BoundingBox {
xmin: ((b.xmin - lx0) * inv_w).clamp(0.0, 1.0),
ymin: ((b.ymin - ly0) * inv_h).clamp(0.0, 1.0),
xmax: ((b.xmax - lx0) * inv_w).clamp(0.0, 1.0),
ymax: ((b.ymax - ly0) * inv_h).clamp(0.0, 1.0),
},
..bbox
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Crop {
pub src_rect: Option<Rect>,
pub dst_rect: Option<Rect>,
pub dst_color: Option<[u8; 4]>,
}
impl Default for Crop {
fn default() -> Self {
Crop::new()
}
}
impl Crop {
pub fn new() -> Self {
Crop {
src_rect: None,
dst_rect: None,
dst_color: None,
}
}
pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
self.src_rect = src_rect;
self
}
pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
self.dst_rect = dst_rect;
self
}
pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
self.dst_color = dst_color;
self
}
pub fn no_crop() -> Self {
Crop::new()
}
pub(crate) fn check_crop_dims(
&self,
src_w: usize,
src_h: usize,
dst_w: usize,
dst_h: usize,
) -> Result<(), Error> {
let src_ok = self
.src_rect
.is_none_or(|r| r.left + r.width <= src_w && r.top + r.height <= src_h);
let dst_ok = self
.dst_rect
.is_none_or(|r| r.left + r.width <= dst_w && r.top + r.height <= dst_h);
match (src_ok, dst_ok) {
(true, true) => Ok(()),
(true, false) => Err(Error::CropInvalid(format!(
"Dest crop invalid: {:?}",
self.dst_rect
))),
(false, true) => Err(Error::CropInvalid(format!(
"Src crop invalid: {:?}",
self.src_rect
))),
(false, false) => Err(Error::CropInvalid(format!(
"Dest and Src crop invalid: {:?} {:?}",
self.dst_rect, self.src_rect
))),
}
}
pub fn check_crop_dyn(
&self,
src: &edgefirst_tensor::TensorDyn,
dst: &edgefirst_tensor::TensorDyn,
) -> Result<(), Error> {
self.check_crop_dims(
src.width().unwrap_or(0),
src.height().unwrap_or(0),
dst.width().unwrap_or(0),
dst.height().unwrap_or(0),
)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Rect {
pub left: usize,
pub top: usize,
pub width: usize,
pub height: usize,
}
impl Rect {
pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
Self {
left,
top,
width,
height,
}
}
pub fn check_rect_dyn(&self, image: &TensorDyn) -> bool {
let w = image.width().unwrap_or(0);
let h = image.height().unwrap_or(0);
self.left + self.width <= w && self.top + self.height <= h
}
}
#[enum_dispatch(ImageProcessor)]
pub trait ImageProcessorTrait {
fn convert(
&mut self,
src: &TensorDyn,
dst: &mut TensorDyn,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>;
fn draw_decoded_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
segmentation: &[Segmentation],
overlay: MaskOverlay<'_>,
) -> Result<()>;
fn draw_proto_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
proto_data: &ProtoData,
overlay: MaskOverlay<'_>,
) -> Result<()>;
fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
}
#[derive(Debug, Clone, Default)]
pub struct ImageProcessorConfig {
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
pub egl_display: Option<EglDisplayKind>,
pub backend: ComputeBackend,
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub enum ComputeBackend {
#[default]
Auto,
Cpu,
G2d,
OpenGl,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum ForcedBackend {
Cpu,
G2d,
OpenGl,
}
#[derive(Debug)]
pub struct ImageProcessor {
pub cpu: Option<CPUProcessor>,
#[cfg(target_os = "linux")]
pub g2d: Option<G2DProcessor>,
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
pub opengl: Option<GLProcessorThreaded>,
pub(crate) forced_backend: Option<ForcedBackend>,
}
unsafe impl Send for ImageProcessor {}
unsafe impl Sync for ImageProcessor {}
impl ImageProcessor {
pub fn new() -> Result<Self> {
Self::with_config(ImageProcessorConfig::default())
}
#[allow(unused_variables)]
pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
match config.backend {
ComputeBackend::Cpu => {
log::info!("ComputeBackend::Cpu — CPU only");
return Ok(Self {
cpu: Some(CPUProcessor::new()),
#[cfg(target_os = "linux")]
g2d: None,
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
opengl: None,
forced_backend: None,
});
}
ComputeBackend::G2d => {
log::info!("ComputeBackend::G2d — G2D + CPU fallback");
#[cfg(target_os = "linux")]
{
let g2d = match G2DProcessor::new() {
Ok(g) => Some(g),
Err(e) => {
log::warn!("G2D requested but failed to initialize: {e:?}");
None
}
};
return Ok(Self {
cpu: Some(CPUProcessor::new()),
g2d,
#[cfg(feature = "opengl")]
opengl: None,
forced_backend: None,
});
}
#[cfg(not(target_os = "linux"))]
{
log::warn!("G2D requested but not available on this platform, using CPU");
return Ok(Self {
cpu: Some(CPUProcessor::new()),
forced_backend: None,
});
}
}
ComputeBackend::OpenGl => {
log::info!("ComputeBackend::OpenGl — OpenGL + CPU fallback");
#[cfg(target_os = "linux")]
{
#[cfg(feature = "opengl")]
let opengl = match GLProcessorThreaded::new(config.egl_display) {
Ok(gl) => Some(gl),
Err(e) => {
log::warn!("OpenGL requested but failed to initialize: {e:?}");
None
}
};
return Ok(Self {
cpu: Some(CPUProcessor::new()),
g2d: None,
#[cfg(feature = "opengl")]
opengl,
forced_backend: None,
});
}
#[cfg(not(target_os = "linux"))]
{
log::warn!("OpenGL requested but not available on this platform, using CPU");
return Ok(Self {
cpu: Some(CPUProcessor::new()),
forced_backend: None,
});
}
}
ComputeBackend::Auto => { }
}
if let Ok(val) = std::env::var("EDGEFIRST_FORCE_BACKEND") {
let val_lower = val.to_lowercase();
let forced = match val_lower.as_str() {
"cpu" => ForcedBackend::Cpu,
"g2d" => ForcedBackend::G2d,
"opengl" => ForcedBackend::OpenGl,
other => {
return Err(Error::ForcedBackendUnavailable(format!(
"unknown EDGEFIRST_FORCE_BACKEND value: {other:?} (expected cpu, g2d, or opengl)"
)));
}
};
log::info!("EDGEFIRST_FORCE_BACKEND={val} — only initializing {val_lower} backend");
return match forced {
ForcedBackend::Cpu => Ok(Self {
cpu: Some(CPUProcessor::new()),
#[cfg(target_os = "linux")]
g2d: None,
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
opengl: None,
forced_backend: Some(ForcedBackend::Cpu),
}),
ForcedBackend::G2d => {
#[cfg(target_os = "linux")]
{
let g2d = G2DProcessor::new().map_err(|e| {
Error::ForcedBackendUnavailable(format!(
"g2d forced but failed to initialize: {e:?}"
))
})?;
Ok(Self {
cpu: None,
g2d: Some(g2d),
#[cfg(feature = "opengl")]
opengl: None,
forced_backend: Some(ForcedBackend::G2d),
})
}
#[cfg(not(target_os = "linux"))]
{
Err(Error::ForcedBackendUnavailable(
"g2d backend is only available on Linux".into(),
))
}
}
ForcedBackend::OpenGl => {
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
{
let opengl = GLProcessorThreaded::new(config.egl_display).map_err(|e| {
Error::ForcedBackendUnavailable(format!(
"opengl forced but failed to initialize: {e:?}"
))
})?;
Ok(Self {
cpu: None,
g2d: None,
opengl: Some(opengl),
forced_backend: Some(ForcedBackend::OpenGl),
})
}
#[cfg(not(all(target_os = "linux", feature = "opengl")))]
{
Err(Error::ForcedBackendUnavailable(
"opengl backend requires Linux with the 'opengl' feature enabled"
.into(),
))
}
}
};
}
#[cfg(target_os = "linux")]
let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
.map(|x| x != "0" && x.to_lowercase() != "false")
.unwrap_or(false)
{
log::debug!("EDGEFIRST_DISABLE_G2D is set");
None
} else {
match G2DProcessor::new() {
Ok(g2d_converter) => Some(g2d_converter),
Err(err) => {
log::warn!("Failed to initialize G2D converter: {err:?}");
None
}
}
};
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
.map(|x| x != "0" && x.to_lowercase() != "false")
.unwrap_or(false)
{
log::debug!("EDGEFIRST_DISABLE_GL is set");
None
} else {
match GLProcessorThreaded::new(config.egl_display) {
Ok(gl_converter) => Some(gl_converter),
Err(err) => {
log::warn!("Failed to initialize GL converter: {err:?}");
None
}
}
};
let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
.map(|x| x != "0" && x.to_lowercase() != "false")
.unwrap_or(false)
{
log::debug!("EDGEFIRST_DISABLE_CPU is set");
None
} else {
Some(CPUProcessor::new())
};
Ok(Self {
cpu,
#[cfg(target_os = "linux")]
g2d,
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
opengl,
forced_backend: None,
})
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
if let Some(ref mut gl) = self.opengl {
gl.set_int8_interpolation_mode(mode)?;
}
Ok(())
}
pub fn create_image(
&self,
width: usize,
height: usize,
format: PixelFormat,
dtype: DType,
memory: Option<TensorMemory>,
) -> Result<TensorDyn> {
#[cfg(target_os = "linux")]
let dma_stride_bytes: Option<usize> = primary_plane_bpp(format, dtype.size())
.and_then(|bpp| width.checked_mul(bpp))
.and_then(align_pitch_bytes_to_gpu_alignment);
#[cfg(target_os = "linux")]
let try_dma = || -> Result<TensorDyn> {
let packed = format.layout() == edgefirst_tensor::PixelLayout::Packed;
match dma_stride_bytes {
Some(stride)
if packed
&& primary_plane_bpp(format, dtype.size())
.and_then(|bpp| width.checked_mul(bpp))
.is_some_and(|natural| stride > natural) =>
{
log::debug!(
"create_image: padding row stride for {format:?} {width}x{height} \
from natural pitch to {stride} bytes for GPU alignment"
);
Ok(TensorDyn::image_with_stride(
width,
height,
format,
dtype,
stride,
Some(edgefirst_tensor::TensorMemory::Dma),
)?)
}
_ => Ok(TensorDyn::image(
width,
height,
format,
dtype,
Some(edgefirst_tensor::TensorMemory::Dma),
)?),
}
};
match memory {
#[cfg(target_os = "linux")]
Some(TensorMemory::Dma) => {
return try_dma();
}
Some(mem) => {
return Ok(TensorDyn::image(width, height, format, dtype, Some(mem))?);
}
None => {}
}
#[cfg(target_os = "linux")]
{
#[cfg(feature = "opengl")]
let gl_uses_pbo = self
.opengl
.as_ref()
.is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
#[cfg(not(feature = "opengl"))]
let gl_uses_pbo = false;
if !gl_uses_pbo {
if let Ok(img) = try_dma() {
return Ok(img);
}
}
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if dtype.size() == 1 {
if let Some(gl) = &self.opengl {
match gl.create_pbo_image(width, height, format) {
Ok(t) => {
if dtype == DType::I8 {
debug_assert!(
t.chroma().is_none(),
"PBO i8 transmute requires chroma == None"
);
let t_i8: Tensor<i8> = unsafe { std::mem::transmute(t) };
return Ok(TensorDyn::from(t_i8));
}
return Ok(TensorDyn::from(t));
}
Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
}
}
}
Ok(TensorDyn::image(
width,
height,
format,
dtype,
Some(edgefirst_tensor::TensorMemory::Mem),
)?)
}
#[cfg(target_os = "linux")]
pub fn import_image(
&self,
image: edgefirst_tensor::PlaneDescriptor,
chroma: Option<edgefirst_tensor::PlaneDescriptor>,
width: usize,
height: usize,
format: PixelFormat,
dtype: DType,
) -> Result<TensorDyn> {
use edgefirst_tensor::{Tensor, TensorMemory};
let image_stride = image.stride();
let image_offset = image.offset();
let chroma_stride = chroma.as_ref().and_then(|c| c.stride());
let chroma_offset = chroma.as_ref().and_then(|c| c.offset());
if let Some(chroma_pd) = chroma {
if dtype != DType::U8 && dtype != DType::I8 {
return Err(Error::NotSupported(format!(
"multiplane import only supports U8/I8, got {dtype:?}"
)));
}
if format.layout() != PixelLayout::SemiPlanar {
return Err(Error::NotSupported(format!(
"import_image with chroma requires a semi-planar format, got {format:?}"
)));
}
let chroma_h = match format {
PixelFormat::Nv12 => {
if !height.is_multiple_of(2) {
return Err(Error::InvalidShape(format!(
"NV12 requires even height, got {height}"
)));
}
height / 2
}
PixelFormat::Nv16 => {
return Err(Error::NotSupported(
"multiplane NV16 is not yet supported; use contiguous NV16 instead".into(),
))
}
_ => {
return Err(Error::NotSupported(format!(
"unsupported semi-planar format: {format:?}"
)))
}
};
let luma = Tensor::<u8>::from_fd(image.into_fd(), &[height, width], Some("luma"))?;
if luma.memory() != TensorMemory::Dma {
return Err(Error::NotSupported(format!(
"luma fd must be DMA-backed, got {:?}",
luma.memory()
)));
}
let chroma_tensor =
Tensor::<u8>::from_fd(chroma_pd.into_fd(), &[chroma_h, width], Some("chroma"))?;
if chroma_tensor.memory() != TensorMemory::Dma {
return Err(Error::NotSupported(format!(
"chroma fd must be DMA-backed, got {:?}",
chroma_tensor.memory()
)));
}
let mut tensor = Tensor::<u8>::from_planes(luma, chroma_tensor, format)?;
if let Some(s) = image_stride {
tensor.set_row_stride(s)?;
}
if let Some(o) = image_offset {
tensor.set_plane_offset(o);
}
if let Some(chroma_ref) = tensor.chroma_mut() {
if let Some(s) = chroma_stride {
if s < width {
return Err(Error::InvalidShape(format!(
"chroma stride {s} < minimum {width} for {format:?}"
)));
}
chroma_ref.set_row_stride_unchecked(s);
}
if let Some(o) = chroma_offset {
chroma_ref.set_plane_offset(o);
}
}
if dtype == DType::I8 {
const {
assert!(std::mem::size_of::<Tensor<u8>>() == std::mem::size_of::<Tensor<i8>>());
assert!(
std::mem::align_of::<Tensor<u8>>() == std::mem::align_of::<Tensor<i8>>()
);
}
let tensor_i8: Tensor<i8> = unsafe { std::mem::transmute(tensor) };
return Ok(TensorDyn::from(tensor_i8));
}
Ok(TensorDyn::from(tensor))
} else {
let shape = match format.layout() {
PixelLayout::Packed => vec![height, width, format.channels()],
PixelLayout::Planar => vec![format.channels(), height, width],
PixelLayout::SemiPlanar => {
let total_h = match format {
PixelFormat::Nv12 => {
if !height.is_multiple_of(2) {
return Err(Error::InvalidShape(format!(
"NV12 requires even height, got {height}"
)));
}
height * 3 / 2
}
PixelFormat::Nv16 => height * 2,
_ => {
return Err(Error::InvalidShape(format!(
"unknown semi-planar height multiplier for {format:?}"
)))
}
};
vec![total_h, width]
}
_ => {
return Err(Error::NotSupported(format!(
"unsupported pixel layout for import_image: {:?}",
format.layout()
)));
}
};
let tensor = TensorDyn::from_fd(image.into_fd(), &shape, dtype, None)?;
if tensor.memory() != TensorMemory::Dma {
return Err(Error::NotSupported(format!(
"import_image requires DMA-backed fd, got {:?}",
tensor.memory()
)));
}
let mut tensor = tensor.with_format(format)?;
if let Some(s) = image_stride {
tensor.set_row_stride(s)?;
}
if let Some(o) = image_offset {
tensor.set_plane_offset(o);
}
Ok(tensor)
}
}
pub fn draw_masks(
&mut self,
decoder: &edgefirst_decoder::Decoder,
outputs: &[&TensorDyn],
dst: &mut TensorDyn,
overlay: MaskOverlay<'_>,
) -> Result<Vec<DetectBox>> {
let mut output_boxes = Vec::with_capacity(100);
let proto_result = decoder
.decode_proto(outputs, &mut output_boxes)
.map_err(|e| Error::Internal(format!("decode_proto: {e:#?}")))?;
if let Some(proto_data) = proto_result {
self.draw_proto_masks(dst, &output_boxes, &proto_data, overlay)?;
} else {
let mut output_masks = Vec::with_capacity(100);
decoder
.decode(outputs, &mut output_boxes, &mut output_masks)
.map_err(|e| Error::Internal(format!("decode: {e:#?}")))?;
self.draw_decoded_masks(dst, &output_boxes, &output_masks, overlay)?;
}
Ok(output_boxes)
}
#[cfg(feature = "tracker")]
pub fn draw_masks_tracked<TR: edgefirst_tracker::Tracker<DetectBox>>(
&mut self,
decoder: &edgefirst_decoder::Decoder,
tracker: &mut TR,
timestamp: u64,
outputs: &[&TensorDyn],
dst: &mut TensorDyn,
overlay: MaskOverlay<'_>,
) -> Result<(Vec<DetectBox>, Vec<edgefirst_tracker::TrackInfo>)> {
let mut output_boxes = Vec::with_capacity(100);
let mut output_tracks = Vec::new();
let proto_result = decoder
.decode_proto_tracked(
tracker,
timestamp,
outputs,
&mut output_boxes,
&mut output_tracks,
)
.map_err(|e| Error::Internal(format!("decode_proto_tracked: {e:#?}")))?;
if let Some(proto_data) = proto_result {
self.draw_proto_masks(dst, &output_boxes, &proto_data, overlay)?;
} else {
let mut output_masks = Vec::with_capacity(100);
decoder
.decode_tracked(
tracker,
timestamp,
outputs,
&mut output_boxes,
&mut output_masks,
&mut output_tracks,
)
.map_err(|e| Error::Internal(format!("decode_tracked: {e:#?}")))?;
self.draw_decoded_masks(dst, &output_boxes, &output_masks, overlay)?;
}
Ok((output_boxes, output_tracks))
}
pub fn materialize_masks(
&mut self,
detect: &[DetectBox],
proto_data: &ProtoData,
letterbox: Option<[f32; 4]>,
resolution: MaskResolution,
) -> Result<Vec<Segmentation>> {
let cpu = self.cpu.as_mut().ok_or(Error::NoConverter)?;
match resolution {
MaskResolution::Proto => cpu.materialize_segmentations(detect, proto_data, letterbox),
MaskResolution::Scaled { width, height } => {
cpu.materialize_scaled_segmentations(detect, proto_data, letterbox, width, height)
}
}
}
}
impl ImageProcessorTrait for ImageProcessor {
fn convert(
&mut self,
src: &TensorDyn,
dst: &mut TensorDyn,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()> {
let start = Instant::now();
let src_fmt = src.format();
let dst_fmt = dst.format();
log::trace!(
"convert: {src_fmt:?}({:?}/{:?}) → {dst_fmt:?}({:?}/{:?}), \
rotation={rotation:?}, flip={flip:?}, backend={:?}",
src.dtype(),
src.memory(),
dst.dtype(),
dst.memory(),
self.forced_backend,
);
if let Some(forced) = self.forced_backend {
return match forced {
ForcedBackend::Cpu => {
if let Some(cpu) = self.cpu.as_mut() {
let r = cpu.convert(src, dst, rotation, flip, crop);
log::trace!(
"convert: forced=cpu result={} ({:?})",
if r.is_ok() { "ok" } else { "err" },
start.elapsed()
);
return r;
}
Err(Error::ForcedBackendUnavailable("cpu".into()))
}
ForcedBackend::G2d => {
#[cfg(target_os = "linux")]
if let Some(g2d) = self.g2d.as_mut() {
let r = g2d.convert(src, dst, rotation, flip, crop);
log::trace!(
"convert: forced=g2d result={} ({:?})",
if r.is_ok() { "ok" } else { "err" },
start.elapsed()
);
return r;
}
Err(Error::ForcedBackendUnavailable("g2d".into()))
}
ForcedBackend::OpenGl => {
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
let r = opengl.convert(src, dst, rotation, flip, crop);
log::trace!(
"convert: forced=opengl result={} ({:?})",
if r.is_ok() { "ok" } else { "err" },
start.elapsed()
);
return r;
}
Err(Error::ForcedBackendUnavailable("opengl".into()))
}
};
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
match opengl.convert(src, dst, rotation, flip, crop) {
Ok(_) => {
log::trace!(
"convert: auto selected=opengl for {src_fmt:?}→{dst_fmt:?} ({:?})",
start.elapsed()
);
return Ok(());
}
Err(e) => {
log::trace!("convert: auto opengl declined {src_fmt:?}→{dst_fmt:?}: {e}");
}
}
}
#[cfg(target_os = "linux")]
if let Some(g2d) = self.g2d.as_mut() {
match g2d.convert(src, dst, rotation, flip, crop) {
Ok(_) => {
log::trace!(
"convert: auto selected=g2d for {src_fmt:?}→{dst_fmt:?} ({:?})",
start.elapsed()
);
return Ok(());
}
Err(e) => {
log::trace!("convert: auto g2d declined {src_fmt:?}→{dst_fmt:?}: {e}");
}
}
}
if let Some(cpu) = self.cpu.as_mut() {
match cpu.convert(src, dst, rotation, flip, crop) {
Ok(_) => {
log::trace!(
"convert: auto selected=cpu for {src_fmt:?}→{dst_fmt:?} ({:?})",
start.elapsed()
);
return Ok(());
}
Err(e) => {
log::trace!("convert: auto cpu failed {src_fmt:?}→{dst_fmt:?}: {e}");
return Err(e);
}
}
}
Err(Error::NoConverter)
}
fn draw_decoded_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
segmentation: &[Segmentation],
overlay: MaskOverlay<'_>,
) -> Result<()> {
let start = Instant::now();
if let Some(bg) = overlay.background {
if bg.aliases(dst) {
return Err(Error::AliasedBuffers(
"background must not reference the same buffer as dst".to_string(),
));
}
}
let lb_boxes: Vec<DetectBox>;
let lb_segs: Vec<Segmentation>;
let (detect, segmentation) = if let Some(lb) = overlay.letterbox {
lb_boxes = detect.iter().map(|&d| unletter_bbox(d, lb)).collect();
lb_segs = if segmentation.len() == lb_boxes.len() {
segmentation
.iter()
.zip(lb_boxes.iter())
.map(|(s, d)| Segmentation {
xmin: d.bbox.xmin,
ymin: d.bbox.ymin,
xmax: d.bbox.xmax,
ymax: d.bbox.ymax,
segmentation: s.segmentation.clone(),
})
.collect()
} else {
segmentation.to_vec()
};
(lb_boxes.as_slice(), lb_segs.as_slice())
} else {
(detect, segmentation)
};
#[cfg(target_os = "linux")]
let is_empty_frame = detect.is_empty() && segmentation.is_empty();
if let Some(forced) = self.forced_backend {
return match forced {
ForcedBackend::Cpu => {
if let Some(cpu) = self.cpu.as_mut() {
return cpu.draw_decoded_masks(dst, detect, segmentation, overlay);
}
Err(Error::ForcedBackendUnavailable("cpu".into()))
}
ForcedBackend::G2d => {
#[cfg(target_os = "linux")]
if let Some(g2d) = self.g2d.as_mut() {
return g2d.draw_decoded_masks(dst, detect, segmentation, overlay);
}
Err(Error::ForcedBackendUnavailable("g2d".into()))
}
ForcedBackend::OpenGl => {
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
return opengl.draw_decoded_masks(dst, detect, segmentation, overlay);
}
Err(Error::ForcedBackendUnavailable("opengl".into()))
}
};
}
#[cfg(target_os = "linux")]
if is_empty_frame {
if let Some(g2d) = self.g2d.as_mut() {
match g2d.draw_decoded_masks(dst, detect, segmentation, overlay) {
Ok(_) => {
log::trace!(
"draw_decoded_masks empty frame via g2d in {:?}",
start.elapsed()
);
return Ok(());
}
Err(e) => log::trace!("g2d empty-frame path unavailable: {e:?}"),
}
}
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
log::trace!(
"draw_decoded_masks started with opengl in {:?}",
start.elapsed()
);
match opengl.draw_decoded_masks(dst, detect, segmentation, overlay) {
Ok(_) => {
log::trace!("draw_decoded_masks with opengl in {:?}", start.elapsed());
return Ok(());
}
Err(e) => {
log::trace!("draw_decoded_masks didn't work with opengl: {e:?}")
}
}
}
log::trace!(
"draw_decoded_masks started with cpu in {:?}",
start.elapsed()
);
if let Some(cpu) = self.cpu.as_mut() {
match cpu.draw_decoded_masks(dst, detect, segmentation, overlay) {
Ok(_) => {
log::trace!("draw_decoded_masks with cpu in {:?}", start.elapsed());
return Ok(());
}
Err(e) => {
log::trace!("draw_decoded_masks didn't work with cpu: {e:?}");
return Err(e);
}
}
}
Err(Error::NoConverter)
}
fn draw_proto_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
proto_data: &ProtoData,
overlay: MaskOverlay<'_>,
) -> Result<()> {
let start = Instant::now();
if let Some(bg) = overlay.background {
if bg.aliases(dst) {
return Err(Error::AliasedBuffers(
"background must not reference the same buffer as dst".to_string(),
));
}
}
let lb_boxes: Vec<DetectBox>;
let render_detect = if let Some(lb) = overlay.letterbox {
lb_boxes = detect.iter().map(|&d| unletter_bbox(d, lb)).collect();
lb_boxes.as_slice()
} else {
detect
};
#[cfg(target_os = "linux")]
let is_empty_frame = detect.is_empty();
if let Some(forced) = self.forced_backend {
return match forced {
ForcedBackend::Cpu => {
if let Some(cpu) = self.cpu.as_mut() {
return cpu.draw_proto_masks(dst, render_detect, proto_data, overlay);
}
Err(Error::ForcedBackendUnavailable("cpu".into()))
}
ForcedBackend::G2d => {
#[cfg(target_os = "linux")]
if let Some(g2d) = self.g2d.as_mut() {
return g2d.draw_proto_masks(dst, render_detect, proto_data, overlay);
}
Err(Error::ForcedBackendUnavailable("g2d".into()))
}
ForcedBackend::OpenGl => {
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
return opengl.draw_proto_masks(dst, render_detect, proto_data, overlay);
}
Err(Error::ForcedBackendUnavailable("opengl".into()))
}
};
}
#[cfg(target_os = "linux")]
if is_empty_frame {
if let Some(g2d) = self.g2d.as_mut() {
match g2d.draw_proto_masks(dst, render_detect, proto_data, overlay) {
Ok(_) => {
log::trace!(
"draw_proto_masks empty frame via g2d in {:?}",
start.elapsed()
);
return Ok(());
}
Err(e) => log::trace!("g2d empty-frame path unavailable: {e:?}"),
}
}
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let (Some(_), Some(_)) = (self.cpu.as_ref(), self.opengl.as_ref()) {
let segmentation = match self.cpu.as_mut() {
Some(cpu) => {
log::trace!(
"draw_proto_masks started with hybrid (cpu+opengl) in {:?}",
start.elapsed()
);
cpu.materialize_segmentations(detect, proto_data, overlay.letterbox)?
}
None => unreachable!("cpu presence checked above"),
};
if let Some(opengl) = self.opengl.as_mut() {
match opengl.draw_decoded_masks(dst, render_detect, &segmentation, overlay) {
Ok(_) => {
log::trace!(
"draw_proto_masks with hybrid (cpu+opengl) in {:?}",
start.elapsed()
);
return Ok(());
}
Err(e) => {
log::trace!(
"draw_proto_masks hybrid path failed, falling back to cpu: {e:?}"
);
}
}
}
}
let Some(cpu) = self.cpu.as_mut() else {
return Err(Error::Internal(
"draw_proto_masks requires CPU backend for fallback path".into(),
));
};
log::trace!("draw_proto_masks started with cpu in {:?}", start.elapsed());
cpu.draw_proto_masks(dst, render_detect, proto_data, overlay)
}
fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
let start = Instant::now();
if let Some(forced) = self.forced_backend {
return match forced {
ForcedBackend::Cpu => {
if let Some(cpu) = self.cpu.as_mut() {
return cpu.set_class_colors(colors);
}
Err(Error::ForcedBackendUnavailable("cpu".into()))
}
ForcedBackend::G2d => Err(Error::NotSupported(
"g2d does not support set_class_colors".into(),
)),
ForcedBackend::OpenGl => {
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
return opengl.set_class_colors(colors);
}
Err(Error::ForcedBackendUnavailable("opengl".into()))
}
};
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
if let Some(opengl) = self.opengl.as_mut() {
log::trace!("image started with opengl in {:?}", start.elapsed());
match opengl.set_class_colors(colors) {
Ok(_) => {
log::trace!("colors set with opengl in {:?}", start.elapsed());
return Ok(());
}
Err(e) => {
log::trace!("colors didn't set with opengl: {e:?}")
}
}
}
log::trace!("image started with cpu in {:?}", start.elapsed());
if let Some(cpu) = self.cpu.as_mut() {
match cpu.set_class_colors(colors) {
Ok(_) => {
log::trace!("colors set with cpu in {:?}", start.elapsed());
return Ok(());
}
Err(e) => {
log::trace!("colors didn't set with cpu: {e:?}");
return Err(e);
}
}
}
Err(Error::NoConverter)
}
}
fn read_exif_orientation(exif_bytes: &[u8]) -> (Rotation, Flip) {
let exifreader = exif::Reader::new();
let Ok(exif_) = exifreader.read_raw(exif_bytes.to_vec()) else {
return (Rotation::None, Flip::None);
};
let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
return (Rotation::None, Flip::None);
};
match orientation.value.get_uint(0) {
Some(1) => (Rotation::None, Flip::None),
Some(2) => (Rotation::None, Flip::Horizontal),
Some(3) => (Rotation::Rotate180, Flip::None),
Some(4) => (Rotation::Rotate180, Flip::Horizontal),
Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
Some(6) => (Rotation::Clockwise90, Flip::None),
Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
Some(8) => (Rotation::CounterClockwise90, Flip::None),
Some(v) => {
log::warn!("broken orientation EXIF value: {v}");
(Rotation::None, Flip::None)
}
None => (Rotation::None, Flip::None),
}
}
fn pixelfmt_to_colorspace(fmt: PixelFormat) -> Option<ColorSpace> {
match fmt {
PixelFormat::Rgb => Some(ColorSpace::RGB),
PixelFormat::Rgba => Some(ColorSpace::RGBA),
PixelFormat::Grey => Some(ColorSpace::Luma),
_ => None,
}
}
fn colorspace_to_pixelfmt(cs: ColorSpace) -> Option<PixelFormat> {
match cs {
ColorSpace::RGB => Some(PixelFormat::Rgb),
ColorSpace::RGBA => Some(PixelFormat::Rgba),
ColorSpace::Luma => Some(PixelFormat::Grey),
_ => None,
}
}
fn load_jpeg(
image: &[u8],
format: Option<PixelFormat>,
memory: Option<TensorMemory>,
) -> Result<TensorDyn> {
let colour = match format {
Some(f) => pixelfmt_to_colorspace(f)
.ok_or_else(|| Error::NotSupported(format!("Unsupported image format {f:?}")))?,
None => ColorSpace::RGB,
};
let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
let mut decoder = JpegDecoder::new_with_options(image, options);
decoder.decode_headers()?;
let image_info = decoder.info().ok_or(Error::Internal(
"JPEG did not return decoded image info".to_string(),
))?;
let converted_cs = decoder
.get_output_colorspace()
.ok_or(Error::Internal("No output colorspace".to_string()))?;
let converted_fmt = colorspace_to_pixelfmt(converted_cs).ok_or(Error::NotSupported(
"Unsupported JPEG decoder output".to_string(),
))?;
let dest_fmt = format.unwrap_or(converted_fmt);
let (rotation, flip) = decoder
.exif()
.map(|x| read_exif_orientation(x))
.unwrap_or((Rotation::None, Flip::None));
let w = image_info.width as usize;
let h = image_info.height as usize;
if (rotation, flip) == (Rotation::None, Flip::None) {
#[cfg(target_os = "linux")]
if let Some(aligned_pitch) = padded_dma_pitch_for(dest_fmt, w, &memory) {
let staging = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
decoder.decode_into(&mut staging.map()?)?;
let packed = if converted_fmt != dest_fmt {
let mut tmp = Tensor::<u8>::image(w, h, dest_fmt, Some(TensorMemory::Mem))?;
CPUProcessor::convert_format_pf(&staging, &mut tmp, converted_fmt, dest_fmt)?;
tmp
} else {
staging
};
let mut dma = Tensor::<u8>::image_with_stride(
w,
h,
dest_fmt,
aligned_pitch,
Some(TensorMemory::Dma),
)?;
copy_packed_to_padded_dma(&packed, &mut dma)?;
return Ok(TensorDyn::from(dma));
}
let mut img = Tensor::<u8>::image(w, h, dest_fmt, memory)?;
if converted_fmt != dest_fmt {
let tmp = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
decoder.decode_into(&mut tmp.map()?)?;
CPUProcessor::convert_format_pf(&tmp, &mut img, converted_fmt, dest_fmt)?;
return Ok(TensorDyn::from(img));
}
decoder.decode_into(&mut img.map()?)?;
return Ok(TensorDyn::from(img));
}
let mut tmp = Tensor::<u8>::image(w, h, dest_fmt, Some(TensorMemory::Mem))?;
if converted_fmt != dest_fmt {
let tmp2 = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
decoder.decode_into(&mut tmp2.map()?)?;
CPUProcessor::convert_format_pf(&tmp2, &mut tmp, converted_fmt, dest_fmt)?;
} else {
decoder.decode_into(&mut tmp.map()?)?;
}
rotate_flip_to_dyn(&tmp, dest_fmt, rotation, flip, memory)
}
fn load_png(
image: &[u8],
format: Option<PixelFormat>,
memory: Option<TensorMemory>,
) -> Result<TensorDyn> {
let dest_fmt = format.unwrap_or(PixelFormat::Rgb);
let options = DecoderOptions::default()
.png_set_add_alpha_channel(false)
.png_set_decode_animated(false);
let mut decoder = PngDecoder::new_with_options(image, options);
decoder.decode_headers()?;
let (width, height, rotation, flip) = {
let info = decoder
.get_info()
.ok_or_else(|| Error::Internal("PNG did not return decoded image info".to_string()))?;
let (rot, flip) = info
.exif
.as_ref()
.map(|x| read_exif_orientation(x))
.unwrap_or((Rotation::None, Flip::None));
(info.width, info.height, rot, flip)
};
let decoder_cs = decoder
.get_colorspace()
.ok_or_else(|| Error::Internal("PNG decoder did not return colorspace".to_string()))?;
let (decoded_fmt, strip_luma_alpha) = match decoder_cs {
ColorSpace::Luma => (PixelFormat::Grey, false),
ColorSpace::LumaA => (PixelFormat::Grey, true),
ColorSpace::RGB => (PixelFormat::Rgb, false),
ColorSpace::RGBA => (PixelFormat::Rgba, false),
other => {
return Err(Error::NotSupported(format!(
"PNG decoder produced unsupported colorspace {other:?}"
)));
}
};
if decoded_fmt != dest_fmt
&& !crate::cpu::CPUProcessor::support_conversion_pf(decoded_fmt, dest_fmt)
{
return Err(Error::NotSupported(format!(
"load_png: cannot convert decoder output {decoded_fmt:?} to {dest_fmt:?}"
)));
}
let staging = if strip_luma_alpha {
let raw = Tensor::<u8>::new(&[height, width, 2], Some(TensorMemory::Mem), None)?;
decoder.decode_into(&mut raw.map()?)?;
let grey = Tensor::<u8>::image(width, height, PixelFormat::Grey, Some(TensorMemory::Mem))?;
{
let raw_map = raw.map()?;
let mut grey_map = grey.map()?;
let raw_bytes: &[u8] = &raw_map;
let grey_bytes: &mut [u8] = &mut grey_map;
for (pair, out) in raw_bytes.chunks_exact(2).zip(grey_bytes.iter_mut()) {
*out = pair[0];
}
}
grey
} else {
let staging = Tensor::<u8>::image(width, height, decoded_fmt, Some(TensorMemory::Mem))?;
decoder.decode_into(&mut staging.map()?)?;
staging
};
let packed = if decoded_fmt != dest_fmt {
let mut tmp = Tensor::<u8>::image(width, height, dest_fmt, Some(TensorMemory::Mem))?;
CPUProcessor::convert_format_pf(&staging, &mut tmp, decoded_fmt, dest_fmt)?;
tmp
} else {
staging
};
if (rotation, flip) != (Rotation::None, Flip::None) {
return rotate_flip_to_dyn(&packed, dest_fmt, rotation, flip, memory);
}
#[cfg(target_os = "linux")]
if let Some(aligned_pitch) = padded_dma_pitch_for(dest_fmt, width, &memory) {
let mut dma = Tensor::<u8>::image_with_stride(
width,
height,
dest_fmt,
aligned_pitch,
Some(TensorMemory::Dma),
)?;
copy_packed_to_padded_dma(&packed, &mut dma)?;
return Ok(TensorDyn::from(dma));
}
if matches!(memory, Some(TensorMemory::Mem)) {
return Ok(TensorDyn::from(packed));
}
let out = Tensor::<u8>::image(width, height, dest_fmt, memory)?;
{
let src_map = packed.map()?;
let mut dst_map = out.map()?;
let src_bytes: &[u8] = &src_map;
let dst_bytes: &mut [u8] = &mut dst_map;
dst_bytes.copy_from_slice(src_bytes);
}
Ok(TensorDyn::from(out))
}
pub fn load_image(
image: &[u8],
format: Option<PixelFormat>,
memory: Option<TensorMemory>,
) -> Result<TensorDyn> {
if let Ok(i) = load_jpeg(image, format, memory) {
return Ok(i);
}
if let Ok(i) = load_png(image, format, memory) {
return Ok(i);
}
Err(Error::NotSupported(
"Could not decode as jpeg or png".to_string(),
))
}
pub fn save_jpeg(tensor: &TensorDyn, path: impl AsRef<std::path::Path>, quality: u8) -> Result<()> {
let t = tensor.as_u8().ok_or(Error::UnsupportedFormat(
"save_jpeg requires u8 tensor".to_string(),
))?;
let fmt = t.format().ok_or(Error::NotAnImage)?;
if fmt.layout() != PixelLayout::Packed {
return Err(Error::NotImplemented(
"Saving planar images is not supported".to_string(),
));
}
let colour = match fmt {
PixelFormat::Rgb => jpeg_encoder::ColorType::Rgb,
PixelFormat::Rgba => jpeg_encoder::ColorType::Rgba,
_ => {
return Err(Error::NotImplemented(
"Unsupported image format for saving".to_string(),
));
}
};
let w = t.width().ok_or(Error::NotAnImage)?;
let h = t.height().ok_or(Error::NotAnImage)?;
let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
let tensor_map = t.map()?;
encoder.encode(&tensor_map, w as u16, h as u16, colour)?;
Ok(())
}
pub(crate) struct FunctionTimer<T: Display> {
name: T,
start: std::time::Instant,
}
impl<T: Display> FunctionTimer<T> {
pub fn new(name: T) -> Self {
Self {
name,
start: std::time::Instant::now(),
}
}
}
impl<T: Display> Drop for FunctionTimer<T> {
fn drop(&mut self) {
log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
}
}
const DEFAULT_COLORS: [[f32; 4]; 20] = [
[0., 1., 0., 0.7],
[1., 0.5568628, 0., 0.7],
[0.25882353, 0.15294118, 0.13333333, 0.7],
[0.8, 0.7647059, 0.78039216, 0.7],
[0.3137255, 0.3137255, 0.3137255, 0.7],
[0.1411765, 0.3098039, 0.1215686, 0.7],
[1., 0.95686275, 0.5137255, 0.7],
[0.3529412, 0.32156863, 0., 0.7],
[0.4235294, 0.6235294, 0.6509804, 0.7],
[0.5098039, 0.5098039, 0.7294118, 0.7],
[0.00784314, 0.18823529, 0.29411765, 0.7],
[0.0, 0.2706, 1.0, 0.7],
[0.0, 0.0, 0.0, 0.7],
[0.0, 0.5, 0.0, 0.7],
[1.0, 0.0, 0.0, 0.7],
[0.0, 0.0, 1.0, 0.7],
[1.0, 0.5, 0.5, 0.7],
[0.1333, 0.5451, 0.1333, 0.7],
[0.1176, 0.4118, 0.8235, 0.7],
[1., 1., 1., 0.7],
];
const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
let mut result = [[0; M]; N];
let mut i = 0;
while i < N {
let mut j = 0;
while j < M {
result[i][j] = (a[i][j] * 255.0).round() as u8;
j += 1;
}
i += 1;
}
result
}
const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
#[cfg(test)]
#[cfg_attr(coverage_nightly, coverage(off))]
mod alignment_tests {
use super::*;
#[test]
fn align_width_rgba8_common_widths() {
assert_eq!(align_width_for_gpu_pitch(640, 4), 640); assert_eq!(align_width_for_gpu_pitch(1280, 4), 1280); assert_eq!(align_width_for_gpu_pitch(1920, 4), 1920); assert_eq!(align_width_for_gpu_pitch(3840, 4), 3840); assert_eq!(align_width_for_gpu_pitch(3004, 4), 3008); assert_eq!(align_width_for_gpu_pitch(3000, 4), 3008); assert_eq!(align_width_for_gpu_pitch(17, 4), 32); assert_eq!(align_width_for_gpu_pitch(1, 4), 16); }
#[test]
fn align_width_rgb888_packed() {
assert_eq!(align_width_for_gpu_pitch(64, 3), 64); assert_eq!(align_width_for_gpu_pitch(640, 3), 640); assert_eq!(align_width_for_gpu_pitch(1, 3), 64); assert_eq!(align_width_for_gpu_pitch(65, 3), 128); for w in [3004usize, 1281, 100, 17] {
let padded = align_width_for_gpu_pitch(w, 3);
assert!(padded >= w);
assert_eq!((padded * 3) % 64, 0);
assert_eq!((padded * 3) % 3, 0);
}
}
#[test]
fn align_width_grey_u8() {
assert_eq!(align_width_for_gpu_pitch(64, 1), 64);
assert_eq!(align_width_for_gpu_pitch(640, 1), 640);
assert_eq!(align_width_for_gpu_pitch(1, 1), 64);
assert_eq!(align_width_for_gpu_pitch(65, 1), 128);
}
#[test]
fn align_width_zero_inputs() {
assert_eq!(align_width_for_gpu_pitch(0, 4), 0);
assert_eq!(align_width_for_gpu_pitch(640, 0), 640);
}
#[test]
fn align_width_never_returns_smaller_than_input() {
for &bpp in &[1usize, 2, 3, 4, 8] {
for &w in &[
1usize,
17,
64,
65,
100,
1280,
1281,
1920,
3004,
3072,
3840,
usize::MAX / 8,
usize::MAX / 4,
usize::MAX / 2,
usize::MAX - 1,
usize::MAX,
] {
let aligned = align_width_for_gpu_pitch(w, bpp);
assert!(
aligned >= w,
"align_width_for_gpu_pitch({w}, {bpp}) = {aligned} < {w}"
);
}
}
}
#[test]
fn align_width_overflow_returns_unaligned_not_smaller() {
let aligned_extreme = usize::MAX - 15; assert_eq!(
align_width_for_gpu_pitch(aligned_extreme, 4),
aligned_extreme
);
let misaligned_extreme = usize::MAX - 1;
let result = align_width_for_gpu_pitch(misaligned_extreme, 4);
assert!(
result == misaligned_extreme || result >= misaligned_extreme,
"extreme misaligned width must not be rounded down to {result}"
);
}
#[test]
fn checked_lcm_basic_and_overflow() {
assert_eq!(checked_num_integer_lcm(64, 4), Some(64));
assert_eq!(checked_num_integer_lcm(64, 3), Some(192));
assert_eq!(checked_num_integer_lcm(64, 1), Some(64));
assert_eq!(checked_num_integer_lcm(0, 4), Some(0));
assert_eq!(checked_num_integer_lcm(64, 0), Some(0));
assert_eq!(
checked_num_integer_lcm(usize::MAX, usize::MAX - 1),
None,
"coprime extreme values must overflow detect, not panic"
);
}
#[test]
fn primary_plane_bpp_known_formats() {
assert_eq!(primary_plane_bpp(PixelFormat::Rgba, 1), Some(4));
assert_eq!(primary_plane_bpp(PixelFormat::Bgra, 1), Some(4));
assert_eq!(primary_plane_bpp(PixelFormat::Rgb, 1), Some(3));
assert_eq!(primary_plane_bpp(PixelFormat::Grey, 1), Some(1));
assert_eq!(primary_plane_bpp(PixelFormat::Nv12, 1), Some(1));
}
}
#[cfg(test)]
#[cfg_attr(coverage_nightly, coverage(off))]
mod image_tests {
use super::*;
use crate::{CPUProcessor, Rotation};
#[cfg(target_os = "linux")]
use edgefirst_tensor::is_dma_available;
use edgefirst_tensor::{TensorMapTrait, TensorMemory, TensorTrait};
use image::buffer::ConvertBuffer;
fn convert_img(
proc: &mut dyn ImageProcessorTrait,
src: TensorDyn,
dst: TensorDyn,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> (Result<()>, TensorDyn, TensorDyn) {
let src_fourcc = src.format().unwrap();
let dst_fourcc = dst.format().unwrap();
let src_dyn = src;
let mut dst_dyn = dst;
let result = proc.convert(&src_dyn, &mut dst_dyn, rotation, flip, crop);
let src_back = {
let mut __t = src_dyn.into_u8().unwrap();
__t.set_format(src_fourcc).unwrap();
TensorDyn::from(__t)
};
let dst_back = {
let mut __t = dst_dyn.into_u8().unwrap();
__t.set_format(dst_fourcc).unwrap();
TensorDyn::from(__t)
};
(result, src_back, dst_back)
}
#[ctor::ctor]
fn init() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
}
macro_rules! function {
() => {{
fn f() {}
fn type_name_of<T>(_: T) -> &'static str {
std::any::type_name::<T>()
}
let name = type_name_of(f);
match &name[..name.len() - 3].rfind(':') {
Some(pos) => &name[pos + 1..name.len() - 3],
None => &name[..name.len() - 3],
}
}};
}
#[test]
fn test_invalid_crop() {
let src = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
let dst = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
let crop = Crop::new()
.with_src_rect(Some(Rect::new(50, 50, 60, 60)))
.with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
let result = crop.check_crop_dyn(&src, &dst);
assert!(matches!(
result,
Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
));
let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
let result = crop.check_crop_dyn(&src, &dst);
assert!(matches!(
result,
Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
));
let crop = crop
.with_src_rect(Some(Rect::new(50, 50, 60, 60)))
.with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
let result = crop.check_crop_dyn(&src, &dst);
assert!(matches!(
result,
Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
));
let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
let result = crop.check_crop_dyn(&src, &dst);
assert!(result.is_ok());
}
#[test]
fn test_invalid_tensor_format() -> Result<(), Error> {
let mut tensor = Tensor::<u8>::new(&[720, 1280, 4, 1], None, None)?;
let result = tensor.set_format(PixelFormat::Rgb);
assert!(result.is_err(), "4D tensor should reject set_format");
let mut tensor = Tensor::<u8>::new(&[720, 1280, 4], None, None)?;
let result = tensor.set_format(PixelFormat::Rgb);
assert!(result.is_err(), "4-channel tensor should reject RGB format");
Ok(())
}
#[test]
fn test_invalid_image_file() -> Result<(), Error> {
let result = crate::load_image(&[123; 5000], None, None);
assert!(matches!(
result,
Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
Ok(())
}
#[test]
fn test_invalid_jpeg_format() -> Result<(), Error> {
let result = crate::load_image(&[123; 5000], Some(PixelFormat::Yuyv), None);
assert!(matches!(
result,
Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
Ok(())
}
#[test]
fn test_load_resize_save() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
));
let img = crate::load_image(file, Some(PixelFormat::Rgba), None).unwrap();
assert_eq!(img.width(), Some(1280));
assert_eq!(img.height(), Some(720));
let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut converter = CPUProcessor::new();
let (result, _img, dst) = convert_img(
&mut converter,
img,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
assert_eq!(dst.width(), Some(640));
assert_eq!(dst.height(), Some(360));
crate::save_jpeg(&dst, "zidane_resized.jpg", 80).unwrap();
let file = std::fs::read("zidane_resized.jpg").unwrap();
let img = crate::load_image(&file, None, None).unwrap();
assert_eq!(img.width(), Some(640));
assert_eq!(img.height(), Some(360));
assert_eq!(img.format().unwrap(), PixelFormat::Rgb);
}
#[test]
fn test_from_tensor_planar() -> Result<(), Error> {
let mut tensor = Tensor::new(&[3, 720, 1280], None, None)?;
tensor.map()?.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.8bps"
)));
let planar = {
tensor
.set_format(PixelFormat::PlanarRgb)
.map_err(|e| crate::Error::Internal(e.to_string()))?;
TensorDyn::from(tensor)
};
let rbga = load_bytes_to_tensor(
1280,
720,
PixelFormat::Rgba,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)),
)?;
compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
Ok(())
}
#[test]
fn test_from_tensor_invalid_format() {
assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
}
#[test]
#[should_panic(expected = "Failed to save planar RGB image")]
fn test_save_planar() {
let planar_img = load_bytes_to_tensor(
1280,
720,
PixelFormat::PlanarRgb,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.8bps"
)),
)
.unwrap();
let save_path = "/tmp/planar_rgb.jpg";
crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save planar RGB image");
}
#[test]
#[should_panic(expected = "Failed to save YUYV image")]
fn test_save_yuyv() {
let planar_img = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let save_path = "/tmp/yuyv.jpg";
crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save YUYV image");
}
#[test]
fn test_rotation_angle() {
assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
assert_eq!(
Rotation::from_degrees_clockwise(270),
Rotation::CounterClockwise90
);
assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
assert_eq!(
Rotation::from_degrees_clockwise(630),
Rotation::CounterClockwise90
);
}
#[test]
#[should_panic(expected = "rotation angle is not a multiple of 90")]
fn test_rotation_angle_panic() {
Rotation::from_degrees_clockwise(361);
}
#[test]
fn test_disable_env_var() -> Result<(), Error> {
let saved_force = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
#[cfg(target_os = "linux")]
{
let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
let converter = ImageProcessor::new()?;
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
}
assert!(converter.g2d.is_none());
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
{
let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
let converter = ImageProcessor::new()?;
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
}
assert!(converter.opengl.is_none());
}
let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
let converter = ImageProcessor::new()?;
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
}
assert!(converter.cpu.is_none());
let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
let mut converter = ImageProcessor::new()?;
let src = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None)?;
let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None)?;
let (result, _src, _dst) = convert_img(
&mut converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
assert!(matches!(result, Err(Error::NoConverter)));
match original_cpu {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
}
match original_gl {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
}
match original_g2d {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
}
match saved_force {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
Ok(())
}
#[test]
fn test_unsupported_conversion() {
let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
let dst = TensorDyn::image(640, 360, PixelFormat::Nv12, DType::U8, None).unwrap();
let mut converter = ImageProcessor::new().unwrap();
let (result, _src, _dst) = convert_img(
&mut converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
log::debug!("result: {:?}", result);
assert!(matches!(
result,
Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
));
}
#[test]
fn test_load_grey() {
let grey_img = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/grey.jpg"
)),
Some(PixelFormat::Rgba),
None,
)
.unwrap();
let grey_but_rgb_img = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/grey-rgb.jpg"
)),
Some(PixelFormat::Rgba),
None,
)
.unwrap();
compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
}
#[test]
fn test_new_nv12() {
let nv12 = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
assert_eq!(nv12.height(), Some(720));
assert_eq!(nv12.width(), Some(1280));
assert_eq!(nv12.format().unwrap(), PixelFormat::Nv12);
assert_eq!(nv12.format().unwrap().channels(), 1);
assert!(nv12.format().is_some_and(
|f| f.layout() == PixelLayout::Planar || f.layout() == PixelLayout::SemiPlanar
))
}
#[test]
#[cfg(target_os = "linux")]
fn test_new_image_converter() {
let dst_width = 640;
let dst_height = 360;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let mut converter = ImageProcessor::new().unwrap();
let converter_dst = converter
.create_image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
.unwrap();
let (result, src, converter_dst) = convert_img(
&mut converter,
src,
converter_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&converter_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_create_image_dtype_i8() {
let mut converter = ImageProcessor::new().unwrap();
let dst = converter
.create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
.unwrap();
assert_eq!(dst.dtype(), DType::I8);
assert!(dst.width() == Some(320));
assert!(dst.height() == Some(240));
assert_eq!(dst.format(), Some(PixelFormat::Rgb));
let dst_u8 = converter
.create_image(320, 240, PixelFormat::Rgb, DType::U8, None)
.unwrap();
assert_eq!(dst_u8.dtype(), DType::U8);
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let mut dst_i8 = converter
.create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
.unwrap();
converter
.convert(
&src,
&mut dst_i8,
Rotation::None,
Flip::None,
Crop::no_crop(),
)
.unwrap();
}
#[test]
#[cfg(target_os = "linux")]
fn test_create_image_nv12_dma_non_aligned_width() {
let converter = ImageProcessor::new().unwrap();
let result = converter.create_image(
100,
64,
PixelFormat::Nv12,
DType::U8,
Some(TensorMemory::Dma),
);
match result {
Ok(img) => {
assert_eq!(img.width(), Some(100));
assert_eq!(img.height(), Some(64));
assert_eq!(img.format(), Some(PixelFormat::Nv12));
assert!(
img.row_stride().is_none(),
"NV12 must not be stride-padded by create_image",
);
}
Err(e) => {
let msg = format!("{e}");
assert!(
!msg.contains("image_with_stride"),
"NV12 should not hit the stride-padded path: {msg}",
);
}
}
}
#[test]
#[ignore] fn test_crop_skip() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let mut converter = ImageProcessor::new().unwrap();
let converter_dst = converter
.create_image(1280, 720, PixelFormat::Rgba, DType::U8, None)
.unwrap();
let crop = Crop::new()
.with_src_rect(Some(Rect::new(0, 0, 640, 640)))
.with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
let (result, src, converter_dst) = convert_img(
&mut converter,
src,
converter_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
}
#[test]
fn test_invalid_pixel_format() {
assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
}
#[cfg(target_os = "linux")]
static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
#[cfg(target_os = "linux")]
fn is_g2d_available() -> bool {
*G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn is_opengl_available() -> bool {
#[cfg(all(target_os = "linux", feature = "opengl"))]
{
*GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
}
#[cfg(not(all(target_os = "linux", feature = "opengl")))]
{
false
}
}
#[test]
fn test_load_jpeg_with_exif() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane_rotated_exif.jpg"
))
.to_vec();
let loaded = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
assert_eq!(loaded.height(), Some(1280));
assert_eq!(loaded.width(), Some(720));
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let (dst_width, dst_height) = (cpu_src.height().unwrap(), cpu_src.width().unwrap());
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _cpu_src, cpu_dst) = convert_img(
&mut cpu_converter,
cpu_src,
cpu_dst,
Rotation::Clockwise90,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&loaded, &cpu_dst, 0.98, function!());
}
#[test]
fn test_load_png_with_exif() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane_rotated_exif_180.png"
))
.to_vec();
let loaded = crate::load_png(&file, Some(PixelFormat::Rgba), None).unwrap();
assert_eq!(loaded.height(), Some(720));
assert_eq!(loaded.width(), Some(1280));
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _cpu_src, cpu_dst) = convert_img(
&mut cpu_converter,
cpu_src,
cpu_dst,
Rotation::Rotate180,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&loaded, &cpu_dst, 0.98, function!());
}
#[cfg(target_os = "linux")]
fn make_rgb_jpeg(width: u32, height: u32) -> Vec<u8> {
let mut bytes = Vec::with_capacity((width * height * 3) as usize);
for y in 0..height {
for x in 0..width {
bytes.push(((x + y) & 0xFF) as u8);
bytes.push(((x.wrapping_mul(3)) & 0xFF) as u8);
bytes.push(((y.wrapping_mul(5)) & 0xFF) as u8);
}
}
let mut out = Vec::new();
let encoder = jpeg_encoder::Encoder::new(&mut out, 85);
encoder
.encode(
&bytes,
width as u16,
height as u16,
jpeg_encoder::ColorType::Rgb,
)
.expect("jpeg-encoder must succeed on trivial input");
out
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_convert_rgba_non_4_aligned_width_end_to_end() {
use edgefirst_tensor::is_dma_available;
if !is_dma_available() {
eprintln!(
"SKIPPED: test_convert_rgba_non_4_aligned_width_end_to_end — DMA not available"
);
return;
}
let jpeg = make_rgb_jpeg(375, 333);
let src_gl = crate::load_jpeg(&jpeg, Some(PixelFormat::Rgba), None).unwrap();
assert_eq!(src_gl.width(), Some(375));
let stride = src_gl.row_stride().unwrap();
assert_eq!(stride, 1536, "expected padded pitch 1536, got {stride}");
let mut gl_proc = ImageProcessor::new().unwrap();
let gl_dst = gl_proc
.create_image(640, 640, PixelFormat::Rgba, DType::U8, None)
.unwrap();
let (r_gl, _src_gl, gl_dst) = convert_img(
&mut gl_proc,
src_gl,
gl_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
r_gl.expect("GL-backed convert must succeed for 375x333 Rgba src");
let src_cpu =
crate::load_jpeg(&jpeg, Some(PixelFormat::Rgba), Some(TensorMemory::Mem)).unwrap();
let mut cpu_proc = ImageProcessor::with_config(ImageProcessorConfig {
backend: ComputeBackend::Cpu,
..Default::default()
})
.unwrap();
let cpu_dst = TensorDyn::image(
640,
640,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let (r_cpu, _src_cpu, cpu_dst) = convert_img(
&mut cpu_proc,
src_cpu,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
r_cpu.unwrap();
compare_images(&gl_dst, &cpu_dst, 0.95, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_load_jpeg_rgba_non_aligned_pitch_padded_dma() {
use edgefirst_tensor::is_dma_available;
if !is_dma_available() {
eprintln!(
"SKIPPED: test_load_jpeg_rgba_non_aligned_pitch_padded_dma — DMA not available"
);
return;
}
for &w in &[500u32, 612, 428] {
let jpeg = make_rgb_jpeg(w, 333);
let loaded = crate::load_jpeg(&jpeg, Some(PixelFormat::Rgba), None).unwrap();
let natural = (w as usize) * 4;
let aligned = crate::align_pitch_bytes_to_gpu_alignment(natural).unwrap();
assert!(
aligned > natural,
"test sanity: width {w} should be unaligned"
);
let stride = loaded
.row_stride()
.expect("padded DMA path must set an explicit row_stride — regression if None");
assert_eq!(
stride, aligned,
"width {w}: expected padded stride {aligned}, got {stride} \
(regression: pitch-padding branch skipped?)"
);
let eff = loaded.effective_row_stride().unwrap();
assert_eq!(
eff, aligned,
"effective_row_stride must match stored stride"
);
assert_eq!(loaded.width(), Some(w as usize));
assert_eq!(loaded.height(), Some(333));
}
}
#[test]
#[cfg(target_os = "linux")]
fn test_padded_dma_pitch_for_respects_memory_choice() {
use edgefirst_tensor::{is_dma_available, TensorMemory};
let unaligned_w = 500;
assert_eq!(
crate::padded_dma_pitch_for(PixelFormat::Rgba, unaligned_w, &Some(TensorMemory::Mem),),
None,
"Mem must never trigger DMA padding"
);
assert_eq!(
crate::padded_dma_pitch_for(PixelFormat::Rgba, unaligned_w, &Some(TensorMemory::Shm),),
None,
"Shm must never trigger DMA padding"
);
assert_eq!(
crate::padded_dma_pitch_for(PixelFormat::Rgba, unaligned_w, &Some(TensorMemory::Dma),),
Some(2048),
"explicit Dma must pad regardless of runtime DMA availability"
);
let none_result = crate::padded_dma_pitch_for(PixelFormat::Rgba, unaligned_w, &None);
if is_dma_available() {
assert_eq!(
none_result,
Some(2048),
"memory=None + DMA available → pad (will route through DMA)"
);
} else {
assert_eq!(
none_result, None,
"memory=None + DMA unavailable → must NOT pad (would force \
image_with_stride into a DMA-only allocation that fails). \
Regression: padded_dma_pitch_for ignored is_dma_available()."
);
}
}
fn make_grey_png(width: u32, height: u32) -> Vec<u8> {
let mut bytes = Vec::with_capacity((width * height) as usize);
for y in 0..height {
for x in 0..width {
bytes.push(((x + y) & 0xFF) as u8);
}
}
let img = image::GrayImage::from_vec(width, height, bytes).unwrap();
let mut buf = Vec::new();
img.write_to(&mut std::io::Cursor::new(&mut buf), image::ImageFormat::Png)
.unwrap();
buf
}
#[test]
#[cfg(target_os = "linux")]
fn test_load_png_grey_misaligned_width_dma() {
use edgefirst_tensor::is_dma_available;
if !is_dma_available() {
eprintln!("SKIPPED: test_load_png_grey_misaligned_width_dma — DMA not available");
return;
}
let png = make_grey_png(612, 388);
let loaded = crate::load_png(&png, Some(PixelFormat::Grey), None).unwrap();
assert_eq!(loaded.width(), Some(612));
assert_eq!(loaded.height(), Some(388));
assert_eq!(loaded.format(), Some(PixelFormat::Grey));
let map = loaded.as_u8().unwrap().map().unwrap();
let stride = loaded.row_stride().unwrap_or(612);
assert!(stride >= 612);
let bytes: &[u8] = ↦
for y in 0..388usize {
for x in 0..612usize {
let expected = ((x + y) & 0xFF) as u8;
let got = bytes[y * stride + x];
assert_eq!(
got, expected,
"grey png mismatch at ({x},{y}): got {got} expected {expected}"
);
}
}
}
#[test]
fn test_load_png_grey_mem() {
use edgefirst_tensor::TensorMemory;
let png = make_grey_png(612, 100);
let loaded =
crate::load_png(&png, Some(PixelFormat::Grey), Some(TensorMemory::Mem)).unwrap();
assert_eq!(loaded.width(), Some(612));
assert_eq!(loaded.height(), Some(100));
assert_eq!(loaded.format(), Some(PixelFormat::Grey));
let map = loaded.as_u8().unwrap().map().unwrap();
let bytes: &[u8] = ↦
assert_eq!(bytes.len(), 612 * 100);
for y in 0..100 {
for x in 0..612 {
assert_eq!(bytes[y * 612 + x], ((x + y) & 0xFF) as u8);
}
}
}
#[test]
fn test_load_png_grey_to_rgb_mem() {
use edgefirst_tensor::TensorMemory;
let png = make_grey_png(620, 240);
let loaded =
crate::load_png(&png, Some(PixelFormat::Rgb), Some(TensorMemory::Mem)).unwrap();
assert_eq!(loaded.width(), Some(620));
assert_eq!(loaded.height(), Some(240));
assert_eq!(loaded.format(), Some(PixelFormat::Rgb));
let map = loaded.as_u8().unwrap().map().unwrap();
let bytes: &[u8] = ↦
for (x, y) in [(0usize, 0usize), (100, 50), (619, 239)] {
let expected = ((x + y) & 0xFF) as u8;
let off = (y * 620 + x) * 3;
assert_eq!(bytes[off], expected, "R@{x},{y}");
assert_eq!(bytes[off + 1], expected, "G@{x},{y}");
assert_eq!(bytes[off + 2], expected, "B@{x},{y}");
}
}
#[test]
#[cfg(target_os = "linux")]
fn test_g2d_resize() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let dst_width = 640;
let dst_height = 360;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src =
crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
let g2d_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_resize() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let dst_width = 640;
let dst_height = 360;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let mut src = src;
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
for _ in 0..5 {
let gl_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
.unwrap();
let (result, src_back, gl_dst) = convert_img(
&mut gl_converter,
src,
gl_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
src = src_back;
compare_images(&gl_dst, &cpu_dst, 0.98, function!());
}
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_10_threads() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let handles: Vec<_> = (0..10)
.map(|i| {
std::thread::Builder::new()
.name(format!("Thread {i}"))
.spawn(test_opengl_resize)
.unwrap()
})
.collect();
handles.into_iter().for_each(|h| {
if let Err(e) = h.join() {
std::panic::resume_unwind(e)
}
});
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_grey() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let img = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/grey.jpg"
)),
Some(PixelFormat::Grey),
None,
)
.unwrap();
let gl_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
let cpu_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
let mut converter = CPUProcessor::new();
let (result, img, cpu_dst) = convert_img(
&mut converter,
img,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let mut gl = GLProcessorThreaded::new(None).unwrap();
let (result, _img, gl_dst) = convert_img(
&mut gl,
img,
gl_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&gl_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_g2d_src_crop() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let dst_width = 640;
let dst_height = 640;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let crop = Crop {
src_rect: Some(Rect {
left: 0,
top: 0,
width: 640,
height: 360,
}),
dst_rect: None,
dst_color: None,
};
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let g2d_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, _src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_g2d_dst_crop() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let dst_width = 640;
let dst_height = 640;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let crop = Crop {
src_rect: None,
dst_rect: Some(Rect::new(100, 100, 512, 288)),
dst_color: None,
};
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let g2d_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, _src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_g2d_all_rgba() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let dst_width = 640;
let dst_height = 640;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let src_dyn = src;
let mut cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let mut g2d_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let crop = Crop {
src_rect: Some(Rect::new(50, 120, 1024, 576)),
dst_rect: Some(Rect::new(100, 100, 512, 288)),
dst_color: None,
};
for rot in [
Rotation::None,
Rotation::Clockwise90,
Rotation::Rotate180,
Rotation::CounterClockwise90,
] {
cpu_dst
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.fill(114);
g2d_dst
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.fill(114);
for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
let mut cpu_dst_dyn = cpu_dst;
cpu_converter
.convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
.unwrap();
cpu_dst = {
let mut __t = cpu_dst_dyn.into_u8().unwrap();
__t.set_format(PixelFormat::Rgba).unwrap();
TensorDyn::from(__t)
};
let mut g2d_dst_dyn = g2d_dst;
g2d_converter
.convert(&src_dyn, &mut g2d_dst_dyn, Rotation::None, Flip::None, crop)
.unwrap();
g2d_dst = {
let mut __t = g2d_dst_dyn.into_u8().unwrap();
__t.set_format(PixelFormat::Rgba).unwrap();
TensorDyn::from(__t)
};
compare_images(
&g2d_dst,
&cpu_dst,
0.98,
&format!("{} {:?} {:?}", function!(), rot, flip),
);
}
}
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_src_crop() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let dst_width = 640;
let dst_height = 360;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let crop = Crop {
src_rect: Some(Rect {
left: 320,
top: 180,
width: 1280 - 320,
height: 720 - 180,
}),
dst_rect: None,
dst_color: None,
};
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let gl_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let (result, _src, gl_dst) = convert_img(
&mut gl_converter,
src,
gl_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images(&gl_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_dst_crop() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let dst_width = 640;
let dst_height = 640;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let crop = Crop {
src_rect: None,
dst_rect: Some(Rect::new(100, 100, 512, 288)),
dst_color: None,
};
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let gl_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let (result, _src, gl_dst) = convert_img(
&mut gl_converter,
src,
gl_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images(&gl_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_all_rgba() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let dst_width = 640;
let dst_height = 640;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let mut cpu_converter = CPUProcessor::new();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
if is_dma_available() {
mem.push(Some(TensorMemory::Dma));
}
let crop = Crop {
src_rect: Some(Rect::new(50, 120, 1024, 576)),
dst_rect: Some(Rect::new(100, 100, 512, 288)),
dst_color: None,
};
for m in mem {
let src = crate::load_image(&file, Some(PixelFormat::Rgba), m).unwrap();
let src_dyn = src;
for rot in [
Rotation::None,
Rotation::Clockwise90,
Rotation::Rotate180,
Rotation::CounterClockwise90,
] {
for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
.unwrap();
let gl_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
.unwrap();
cpu_dst
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.fill(114);
gl_dst
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.fill(114);
let mut cpu_dst_dyn = cpu_dst;
cpu_converter
.convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
.unwrap();
let cpu_dst = {
let mut __t = cpu_dst_dyn.into_u8().unwrap();
__t.set_format(PixelFormat::Rgba).unwrap();
TensorDyn::from(__t)
};
let mut gl_dst_dyn = gl_dst;
gl_converter
.convert(&src_dyn, &mut gl_dst_dyn, Rotation::None, Flip::None, crop)
.map_err(|e| {
log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
e
})
.unwrap();
let gl_dst = {
let mut __t = gl_dst_dyn.into_u8().unwrap();
__t.set_format(PixelFormat::Rgba).unwrap();
TensorDyn::from(__t)
};
compare_images(
&gl_dst,
&cpu_dst,
0.98,
&format!("{} {:?} {:?}", function!(), rot, flip),
);
}
}
}
}
#[test]
#[cfg(target_os = "linux")]
fn test_cpu_rotate() {
for rot in [
Rotation::Clockwise90,
Rotation::Rotate180,
Rotation::CounterClockwise90,
] {
test_cpu_rotate_(rot);
}
}
#[cfg(target_os = "linux")]
fn test_cpu_rotate_(rot: Rotation) {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let unchanged_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let (dst_width, dst_height) = match rot {
Rotation::None | Rotation::Rotate180 => (src.width().unwrap(), src.height().unwrap()),
Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
(src.height().unwrap(), src.width().unwrap())
}
};
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let (result, cpu_dst, src) = convert_img(
&mut cpu_converter,
cpu_dst,
src,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let (result, _cpu_dst, src) = convert_img(
&mut cpu_converter,
cpu_dst,
src,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&src, &unchanged_src, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_rotate() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
let size = (1280, 720);
let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
if is_dma_available() {
mem.push(Some(TensorMemory::Dma));
}
for m in mem {
for rot in [
Rotation::Clockwise90,
Rotation::Rotate180,
Rotation::CounterClockwise90,
] {
test_opengl_rotate_(size, rot, m);
}
}
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_rotate_(
size: (usize, usize),
rot: Rotation,
tensor_memory: Option<TensorMemory>,
) {
let (dst_width, dst_height) = match rot {
Rotation::None | Rotation::Rotate180 => size,
Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
};
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), tensor_memory).unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, mut src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
for _ in 0..5 {
let gl_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
tensor_memory,
)
.unwrap();
let (result, src_back, gl_dst) = convert_img(
&mut gl_converter,
src,
gl_dst,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
src = src_back;
compare_images(&gl_dst, &cpu_dst, 0.98, function!());
}
}
#[test]
#[cfg(target_os = "linux")]
fn test_g2d_rotate() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let size = (1280, 720);
for rot in [
Rotation::Clockwise90,
Rotation::Rotate180,
Rotation::CounterClockwise90,
] {
test_g2d_rotate_(size, rot);
}
}
#[cfg(target_os = "linux")]
fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
let (dst_width, dst_height) = match rot {
Rotation::None | Rotation::Rotate180 => size,
Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
};
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src =
crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
let cpu_dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let g2d_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, _src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
rot,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
fn test_rgba_to_yuyv_resize_cpu() {
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Rgba,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)),
)
.unwrap();
let (dst_width, dst_height) = (640, 360);
let dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Yuyv, DType::U8, None).unwrap();
let dst_through_yuyv =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let dst_direct =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let (result, _dst, dst_through_yuyv) = convert_img(
&mut cpu_converter,
dst,
dst_through_yuyv,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let (result, _src, dst_direct) = convert_img(
&mut cpu_converter,
src,
dst_direct,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
#[ignore = "opengl doesn't support rendering to PixelFormat::Yuyv texture"]
fn test_rgba_to_yuyv_resize_opengl() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
function!()
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Rgba,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)),
)
.unwrap();
let (dst_width, dst_height) = (640, 360);
let dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Yuyv,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let (result, src, dst) = convert_img(
&mut gl_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::new()
.with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
.with_dst_color(Some([255, 255, 255, 255])),
);
result.unwrap();
std::fs::write(
"rgba_to_yuyv_opengl.yuyv",
dst.as_u8().unwrap().map().unwrap().as_slice(),
)
.unwrap();
let cpu_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Yuyv,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let (result, _src, cpu_dst) = convert_img(
&mut CPUProcessor::new(),
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_rgba_to_yuyv_resize_g2d() {
if !is_g2d_available() {
eprintln!(
"SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
);
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Rgba,
Some(TensorMemory::Dma),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)),
)
.unwrap();
let (dst_width, dst_height) = (1280, 720);
let cpu_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Yuyv,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let g2d_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Yuyv,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let crop = Crop {
src_rect: None,
dst_rect: Some(Rect::new(100, 100, 2, 2)),
dst_color: None,
};
g2d_dst
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.fill(128);
let (result, src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let cpu_dst_img = cpu_dst;
cpu_dst_img
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.fill(128);
let (result, _src, cpu_dst) = convert_img(
&mut CPUProcessor::new(),
src,
cpu_dst_img,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
}
#[test]
fn test_yuyv_to_rgba_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
src.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)));
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_yuyv_to_rgb_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
src.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.as_chunks_mut::<3>()
.0
.iter_mut()
.zip(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
))
.as_chunks::<4>()
.0,
)
.for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_yuyv_to_rgba_g2d() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let dst = TensorDyn::image(
1280,
720,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, _src, dst) = convert_img(
&mut g2d_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)));
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_yuyv_to_rgba_opengl() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
function!()
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
Some(TensorMemory::Dma),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let dst = TensorDyn::image(
1280,
720,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let (result, _src, dst) = convert_img(
&mut gl_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)));
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_yuyv_to_rgb_g2d() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let g2d_dst = TensorDyn::image(
1280,
720,
PixelFormat::Rgb,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
let mut cpu_converter: CPUProcessor = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_yuyv_to_yuyv_resize_g2d() {
if !is_g2d_available() {
eprintln!(
"SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
);
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let g2d_dst = TensorDyn::image(
600,
400,
PixelFormat::Yuyv,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let cpu_dst = TensorDyn::image(600, 400, PixelFormat::Yuyv, DType::U8, None).unwrap();
let mut cpu_converter: CPUProcessor = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
fn test_yuyv_to_rgba_resize_cpu() {
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let (dst_width, dst_height) = (960, 540);
let dst =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let dst_target =
TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
let src_target = load_bytes_to_tensor(
1280,
720,
PixelFormat::Rgba,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)),
)
.unwrap();
let (result, _src_target, dst_target) = convert_img(
&mut cpu_converter,
src_target,
dst_target,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&dst, &dst_target, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
fn test_yuyv_to_rgba_crop_flip_g2d() {
if !is_g2d_available() {
eprintln!(
"SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
);
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
Some(TensorMemory::Dma),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let (dst_width, dst_height) = (640, 640);
let dst_g2d = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let crop = Crop {
src_rect: Some(Rect {
left: 20,
top: 15,
width: 400,
height: 300,
}),
dst_rect: None,
dst_color: None,
};
let (result, src, dst_g2d) = convert_img(
&mut g2d_converter,
src,
dst_g2d,
Rotation::None,
Flip::Horizontal,
crop,
);
result.unwrap();
let dst_cpu = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst_cpu) = convert_img(
&mut cpu_converter,
src,
dst_cpu,
Rotation::None,
Flip::Horizontal,
crop,
);
result.unwrap();
compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_yuyv_to_rgba_crop_flip_opengl() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
function!()
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Yuyv,
Some(TensorMemory::Dma),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.yuyv"
)),
)
.unwrap();
let (dst_width, dst_height) = (640, 640);
let dst_gl = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let crop = Crop {
src_rect: Some(Rect {
left: 20,
top: 15,
width: 400,
height: 300,
}),
dst_rect: None,
dst_color: None,
};
let (result, src, dst_gl) = convert_img(
&mut gl_converter,
src,
dst_gl,
Rotation::None,
Flip::Horizontal,
crop,
);
result.unwrap();
let dst_cpu = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst_cpu) = convert_img(
&mut cpu_converter,
src,
dst_cpu,
Rotation::None,
Flip::Horizontal,
crop,
);
result.unwrap();
compare_images(&dst_gl, &dst_cpu, 0.98, function!());
}
#[test]
fn test_vyuy_to_rgba_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.vyuy"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
src.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)));
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_vyuy_to_rgb_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.vyuy"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
src.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.as_chunks_mut::<3>()
.0
.iter_mut()
.zip(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
))
.as_chunks::<4>()
.0,
)
.for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
fn test_vyuy_to_rgba_g2d() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Vyuy,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.vyuy"
)),
)
.unwrap();
let dst = TensorDyn::image(
1280,
720,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, _src, dst) = convert_img(
&mut g2d_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
match result {
Err(Error::G2D(_)) => {
eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support PixelFormat::Vyuy format");
return;
}
r => r.unwrap(),
}
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)));
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
fn test_vyuy_to_rgb_g2d() {
if !is_g2d_available() {
eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Vyuy,
None,
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.vyuy"
)),
)
.unwrap();
let g2d_dst = TensorDyn::image(
1280,
720,
PixelFormat::Rgb,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut g2d_converter = G2DProcessor::new().unwrap();
let (result, src, g2d_dst) = convert_img(
&mut g2d_converter,
src,
g2d_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
match result {
Err(Error::G2D(_)) => {
eprintln!(
"SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support PixelFormat::Vyuy format"
);
return;
}
r => r.unwrap(),
}
let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
let mut cpu_converter: CPUProcessor = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_vyuy_to_rgba_opengl() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
function!()
);
return;
}
let src = load_bytes_to_tensor(
1280,
720,
PixelFormat::Vyuy,
Some(TensorMemory::Dma),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.vyuy"
)),
)
.unwrap();
let dst = TensorDyn::image(
1280,
720,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let (result, _src, dst) = convert_img(
&mut gl_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
match result {
Err(Error::NotSupported(_)) => {
eprintln!(
"SKIPPED: {} - OpenGL does not support PixelFormat::Vyuy DMA format",
function!()
);
return;
}
r => r.unwrap(),
}
let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
target_image
.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/camera720p.rgba"
)));
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_nv12_to_rgba_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.nv12"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
)),
Some(PixelFormat::Rgba),
None,
)
.unwrap();
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_nv12_to_rgb_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.nv12"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
)),
Some(PixelFormat::Rgb),
None,
)
.unwrap();
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_nv12_to_grey_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.nv12"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Grey, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
)),
Some(PixelFormat::Grey),
None,
)
.unwrap();
compare_images(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_nv12_to_yuyv_cpu() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.nv12"
))
.to_vec();
let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
.copy_from_slice(&file);
let dst = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, dst) = convert_img(
&mut cpu_converter,
src,
dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let target_image = crate::load_image(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
)),
Some(PixelFormat::Rgb),
None,
)
.unwrap();
compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
}
#[test]
fn test_cpu_resize_planar_rgb() {
let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
#[rustfmt::skip]
let src_image = [
255, 0, 0, 255, 0, 255, 0, 255, 0, 0, 255, 255, 255, 255, 0, 255,
255, 0, 0, 0, 0, 0, 0, 255, 255, 0, 255, 0, 255, 0, 255, 255,
0, 0, 255, 0, 0, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255,
255, 0, 0, 0, 0, 0, 0, 255, 255, 0, 255, 0, 255, 0, 255, 255,
];
src.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(&src_image);
let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::new()
.with_dst_rect(Some(Rect {
left: 1,
top: 1,
width: 4,
height: 4,
}))
.with_dst_color(Some([114, 114, 114, 255])),
);
result.unwrap();
#[rustfmt::skip]
let expected_dst = [
114, 114, 114, 114, 114, 114, 255, 0, 0, 255, 114, 255, 0, 255, 255, 114, 0, 0, 255, 0, 114, 255, 0, 255, 255,
114, 114, 114, 114, 114, 114, 0, 255, 0, 255, 114, 0, 0, 0, 0, 114, 0, 255, 255, 0, 114, 0, 0, 0, 0,
114, 114, 114, 114, 114, 114, 0, 0, 255, 0, 114, 0, 0, 255, 255, 114, 255, 255, 0, 0, 114, 0, 0, 255, 255,
];
assert_eq!(
cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
&expected_dst
);
}
#[test]
fn test_cpu_resize_planar_rgba() {
let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
#[rustfmt::skip]
let src_image = [
255, 0, 0, 255, 0, 255, 0, 255, 0, 0, 255, 255, 255, 255, 0, 255,
255, 0, 0, 0, 0, 0, 0, 255, 255, 0, 255, 0, 255, 0, 255, 255,
0, 0, 255, 0, 0, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255,
255, 0, 0, 0, 0, 0, 0, 255, 255, 0, 255, 0, 255, 0, 255, 255,
];
src.as_u8()
.unwrap()
.map()
.unwrap()
.as_mut_slice()
.copy_from_slice(&src_image);
let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgba, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, _src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::new()
.with_dst_rect(Some(Rect {
left: 1,
top: 1,
width: 4,
height: 4,
}))
.with_dst_color(Some([114, 114, 114, 255])),
);
result.unwrap();
#[rustfmt::skip]
let expected_dst = [
114, 114, 114, 114, 114, 114, 255, 0, 0, 255, 114, 255, 0, 255, 255, 114, 0, 0, 255, 0, 114, 255, 0, 255, 255,
114, 114, 114, 114, 114, 114, 0, 255, 0, 255, 114, 0, 0, 0, 0, 114, 0, 255, 255, 0, 114, 0, 0, 0, 0,
114, 114, 114, 114, 114, 114, 0, 0, 255, 0, 114, 0, 0, 255, 255, 114, 255, 255, 0, 0, 114, 0, 0, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 255, 255, 0, 255, 0, 255, 255, 0, 255, 0, 255,
];
assert_eq!(
cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
&expected_dst
);
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
fn test_opengl_resize_planar_rgb() {
if !is_opengl_available() {
eprintln!("SKIPPED: {} - OpenGL not available", function!());
return;
}
if !is_dma_available() {
eprintln!(
"SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
function!()
);
return;
}
let dst_width = 640;
let dst_height = 640;
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/test_image.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::PlanarRgb,
DType::U8,
None,
)
.unwrap();
let mut cpu_converter = CPUProcessor::new();
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let crop_letterbox = Crop::new()
.with_dst_rect(Some(Rect {
left: 102,
top: 102,
width: 440,
height: 440,
}))
.with_dst_color(Some([114, 114, 114, 114]));
let (result, src, cpu_dst) = convert_img(
&mut cpu_converter,
src,
cpu_dst,
Rotation::None,
Flip::None,
crop_letterbox,
);
result.unwrap();
let gl_dst = TensorDyn::image(
dst_width,
dst_height,
PixelFormat::PlanarRgb,
DType::U8,
None,
)
.unwrap();
let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
let (result, _src, gl_dst) = convert_img(
&mut gl_converter,
src,
gl_dst,
Rotation::None,
Flip::None,
crop_letterbox,
);
result.unwrap();
compare_images(&gl_dst, &cpu_dst, 0.98, function!());
}
#[test]
fn test_cpu_resize_nv16() {
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let cpu_nv16_dst = TensorDyn::image(640, 640, PixelFormat::Nv16, DType::U8, None).unwrap();
let cpu_rgb_dst = TensorDyn::image(640, 640, PixelFormat::Rgb, DType::U8, None).unwrap();
let mut cpu_converter = CPUProcessor::new();
let crop = Crop::new()
.with_dst_rect(Some(Rect {
left: 20,
top: 140,
width: 600,
height: 360,
}))
.with_dst_color(Some([255, 128, 0, 255]));
let (result, src, cpu_nv16_dst) = convert_img(
&mut cpu_converter,
src,
cpu_nv16_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
let (result, _src, cpu_rgb_dst) = convert_img(
&mut cpu_converter,
src,
cpu_rgb_dst,
Rotation::None,
Flip::None,
crop,
);
result.unwrap();
compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
}
fn load_bytes_to_tensor(
width: usize,
height: usize,
format: PixelFormat,
memory: Option<TensorMemory>,
bytes: &[u8],
) -> Result<TensorDyn, Error> {
let src = TensorDyn::image(width, height, format, DType::U8, memory)?;
src.as_u8()
.unwrap()
.map()?
.as_mut_slice()
.copy_from_slice(bytes);
Ok(src)
}
fn compare_images(img1: &TensorDyn, img2: &TensorDyn, threshold: f64, name: &str) {
assert_eq!(img1.height(), img2.height(), "Heights differ");
assert_eq!(img1.width(), img2.width(), "Widths differ");
assert_eq!(
img1.format().unwrap(),
img2.format().unwrap(),
"PixelFormat differ"
);
assert!(
matches!(
img1.format().unwrap(),
PixelFormat::Rgb | PixelFormat::Rgba | PixelFormat::Grey | PixelFormat::PlanarRgb
),
"format must be Rgb or Rgba for comparison"
);
let image1 = match img1.format().unwrap() {
PixelFormat::Rgb => image::RgbImage::from_vec(
img1.width().unwrap() as u32,
img1.height().unwrap() as u32,
img1.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap(),
PixelFormat::Rgba => image::RgbaImage::from_vec(
img1.width().unwrap() as u32,
img1.height().unwrap() as u32,
img1.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap()
.convert(),
PixelFormat::Grey => image::GrayImage::from_vec(
img1.width().unwrap() as u32,
img1.height().unwrap() as u32,
img1.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap()
.convert(),
PixelFormat::PlanarRgb => image::GrayImage::from_vec(
img1.width().unwrap() as u32,
(img1.height().unwrap() * 3) as u32,
img1.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap()
.convert(),
_ => return,
};
let image2 = match img2.format().unwrap() {
PixelFormat::Rgb => image::RgbImage::from_vec(
img2.width().unwrap() as u32,
img2.height().unwrap() as u32,
img2.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap(),
PixelFormat::Rgba => image::RgbaImage::from_vec(
img2.width().unwrap() as u32,
img2.height().unwrap() as u32,
img2.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap()
.convert(),
PixelFormat::Grey => image::GrayImage::from_vec(
img2.width().unwrap() as u32,
img2.height().unwrap() as u32,
img2.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap()
.convert(),
PixelFormat::PlanarRgb => image::GrayImage::from_vec(
img2.width().unwrap() as u32,
(img2.height().unwrap() * 3) as u32,
img2.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap()
.convert(),
_ => return,
};
let similarity = image_compare::rgb_similarity_structure(
&image_compare::Algorithm::RootMeanSquared,
&image1,
&image2,
)
.expect("Image Comparison failed");
if similarity.score < threshold {
similarity
.image
.to_color_map()
.save(format!("{name}.png"))
.unwrap();
panic!(
"{name}: converted image and target image have similarity score too low: {} < {}",
similarity.score, threshold
)
}
}
fn compare_images_convert_to_rgb(
img1: &TensorDyn,
img2: &TensorDyn,
threshold: f64,
name: &str,
) {
assert_eq!(img1.height(), img2.height(), "Heights differ");
assert_eq!(img1.width(), img2.width(), "Widths differ");
let mut img_rgb1 = TensorDyn::image(
img1.width().unwrap(),
img1.height().unwrap(),
PixelFormat::Rgb,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let mut img_rgb2 = TensorDyn::image(
img1.width().unwrap(),
img1.height().unwrap(),
PixelFormat::Rgb,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let mut __cv = CPUProcessor::default();
let r1 = __cv.convert(
img1,
&mut img_rgb1,
crate::Rotation::None,
crate::Flip::None,
crate::Crop::default(),
);
let r2 = __cv.convert(
img2,
&mut img_rgb2,
crate::Rotation::None,
crate::Flip::None,
crate::Crop::default(),
);
if r1.is_err() || r2.is_err() {
let w = img1.width().unwrap() as u32;
let data1 = img1.as_u8().unwrap().map().unwrap().to_vec();
let data2 = img2.as_u8().unwrap().map().unwrap().to_vec();
let h1 = (data1.len() as u32) / w;
let h2 = (data2.len() as u32) / w;
let g1 = image::GrayImage::from_vec(w, h1, data1).unwrap();
let g2 = image::GrayImage::from_vec(w, h2, data2).unwrap();
let similarity = image_compare::gray_similarity_structure(
&image_compare::Algorithm::RootMeanSquared,
&g1,
&g2,
)
.expect("Image Comparison failed");
if similarity.score < threshold {
panic!(
"{name}: converted image and target image have similarity score too low: {} < {}",
similarity.score, threshold
)
}
return;
}
let image1 = image::RgbImage::from_vec(
img_rgb1.width().unwrap() as u32,
img_rgb1.height().unwrap() as u32,
img_rgb1.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap();
let image2 = image::RgbImage::from_vec(
img_rgb2.width().unwrap() as u32,
img_rgb2.height().unwrap() as u32,
img_rgb2.as_u8().unwrap().map().unwrap().to_vec(),
)
.unwrap();
let similarity = image_compare::rgb_similarity_structure(
&image_compare::Algorithm::RootMeanSquared,
&image1,
&image2,
)
.expect("Image Comparison failed");
if similarity.score < threshold {
similarity
.image
.to_color_map()
.save(format!("{name}.png"))
.unwrap();
panic!(
"{name}: converted image and target image have similarity score too low: {} < {}",
similarity.score, threshold
)
}
}
#[test]
fn test_nv12_image_creation() {
let width = 640;
let height = 480;
let img = TensorDyn::image(width, height, PixelFormat::Nv12, DType::U8, None).unwrap();
assert_eq!(img.width(), Some(width));
assert_eq!(img.height(), Some(height));
assert_eq!(img.format().unwrap(), PixelFormat::Nv12);
assert_eq!(img.as_u8().unwrap().shape(), &[height * 3 / 2, width]);
}
#[test]
fn test_nv12_channels() {
let img = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
assert_eq!(img.format().unwrap().channels(), 1);
}
#[test]
fn test_tensor_set_format_planar() {
let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
tensor.set_format(PixelFormat::PlanarRgb).unwrap();
assert_eq!(tensor.format(), Some(PixelFormat::PlanarRgb));
assert_eq!(tensor.width(), Some(640));
assert_eq!(tensor.height(), Some(480));
}
#[test]
fn test_tensor_set_format_interleaved() {
let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
tensor.set_format(PixelFormat::Rgba).unwrap();
assert_eq!(tensor.format(), Some(PixelFormat::Rgba));
assert_eq!(tensor.width(), Some(640));
assert_eq!(tensor.height(), Some(480));
}
#[test]
fn test_tensordyn_image_rgb() {
let img = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
assert_eq!(img.width(), Some(640));
assert_eq!(img.height(), Some(480));
assert_eq!(img.format(), Some(PixelFormat::Rgb));
}
#[test]
fn test_tensordyn_image_planar_rgb() {
let img = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
assert_eq!(img.width(), Some(640));
assert_eq!(img.height(), Some(480));
assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
}
#[test]
fn test_rgb_int8_format() {
let img = TensorDyn::image(
1280,
720,
PixelFormat::Rgb,
DType::I8,
Some(TensorMemory::Mem),
)
.unwrap();
assert_eq!(img.width(), Some(1280));
assert_eq!(img.height(), Some(720));
assert_eq!(img.format(), Some(PixelFormat::Rgb));
assert_eq!(img.dtype(), DType::I8);
}
#[test]
fn test_planar_rgb_int8_format() {
let img = TensorDyn::image(
1280,
720,
PixelFormat::PlanarRgb,
DType::I8,
Some(TensorMemory::Mem),
)
.unwrap();
assert_eq!(img.width(), Some(1280));
assert_eq!(img.height(), Some(720));
assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
assert_eq!(img.dtype(), DType::I8);
}
#[test]
fn test_rgb_from_tensor() {
let mut tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
tensor.set_format(PixelFormat::Rgb).unwrap();
let img = TensorDyn::from(tensor);
assert_eq!(img.width(), Some(1280));
assert_eq!(img.height(), Some(720));
assert_eq!(img.format(), Some(PixelFormat::Rgb));
}
#[test]
fn test_planar_rgb_from_tensor() {
let mut tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
tensor.set_format(PixelFormat::PlanarRgb).unwrap();
let img = TensorDyn::from(tensor);
assert_eq!(img.width(), Some(1280));
assert_eq!(img.height(), Some(720));
assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
}
#[test]
fn test_dtype_determines_int8() {
let u8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::U8, None).unwrap();
let i8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::I8, None).unwrap();
assert_eq!(u8_img.dtype(), DType::U8);
assert_eq!(i8_img.dtype(), DType::I8);
}
#[test]
fn test_pixel_layout_packed_vs_planar() {
assert_eq!(PixelFormat::Rgb.layout(), PixelLayout::Packed);
assert_eq!(PixelFormat::Rgba.layout(), PixelLayout::Packed);
assert_eq!(PixelFormat::PlanarRgb.layout(), PixelLayout::Planar);
assert_eq!(PixelFormat::Nv12.layout(), PixelLayout::SemiPlanar);
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
#[test]
fn test_convert_pbo_to_pbo() {
let mut converter = ImageProcessor::new().unwrap();
let is_pbo = converter
.opengl
.as_ref()
.is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
if !is_pbo {
eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
return;
}
let src_w = 640;
let src_h = 480;
let dst_w = 320;
let dst_h = 240;
let pbo_src = converter
.create_image(src_w, src_h, PixelFormat::Rgba, DType::U8, None)
.unwrap();
assert_eq!(
pbo_src.as_u8().unwrap().memory(),
TensorMemory::Pbo,
"create_image should produce a PBO tensor"
);
let file = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
))
.to_vec();
let jpeg_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
let mem_src = TensorDyn::image(
src_w,
src_h,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let (result, _jpeg_src, mem_src) = convert_img(
&mut CPUProcessor::new(),
jpeg_src,
mem_src,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
{
let src_data = mem_src.as_u8().unwrap().map().unwrap();
let mut pbo_map = pbo_src.as_u8().unwrap().map().unwrap();
pbo_map.copy_from_slice(&src_data);
}
let pbo_dst = converter
.create_image(dst_w, dst_h, PixelFormat::Rgba, DType::U8, None)
.unwrap();
assert_eq!(pbo_dst.as_u8().unwrap().memory(), TensorMemory::Pbo);
let mut pbo_dst = pbo_dst;
let result = converter.convert(
&pbo_src,
&mut pbo_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let cpu_dst = TensorDyn::image(
dst_w,
dst_h,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let (result, _mem_src, cpu_dst) = convert_img(
&mut CPUProcessor::new(),
mem_src,
cpu_dst,
Rotation::None,
Flip::None,
Crop::no_crop(),
);
result.unwrap();
let pbo_dst_img = {
let mut __t = pbo_dst.into_u8().unwrap();
__t.set_format(PixelFormat::Rgba).unwrap();
TensorDyn::from(__t)
};
compare_images(&pbo_dst_img, &cpu_dst, 0.95, function!());
log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
}
#[test]
fn test_image_bgra() {
let img = TensorDyn::image(
640,
480,
PixelFormat::Bgra,
DType::U8,
Some(edgefirst_tensor::TensorMemory::Mem),
)
.unwrap();
assert_eq!(img.width(), Some(640));
assert_eq!(img.height(), Some(480));
assert_eq!(img.format().unwrap().channels(), 4);
assert_eq!(img.format().unwrap(), PixelFormat::Bgra);
}
#[test]
fn test_force_backend_cpu() {
let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
let result = ImageProcessor::new();
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
let converter = result.unwrap();
assert!(converter.cpu.is_some());
assert_eq!(converter.forced_backend, Some(ForcedBackend::Cpu));
}
#[test]
fn test_force_backend_invalid() {
let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "invalid") };
let result = ImageProcessor::new();
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
assert!(
matches!(&result, Err(Error::ForcedBackendUnavailable(s)) if s.contains("unknown")),
"invalid backend value should return ForcedBackendUnavailable error: {result:?}"
);
}
#[test]
fn test_force_backend_unset() {
let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
let result = ImageProcessor::new();
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
let converter = result.unwrap();
assert!(converter.forced_backend.is_none());
}
#[test]
fn test_draw_proto_masks_no_cpu_returns_error() {
let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
let result = ImageProcessor::new();
match original_cpu {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
}
match original_gl {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
}
match original_g2d {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
}
let mut converter = result.unwrap();
assert!(converter.cpu.is_none(), "CPU should be disabled");
let dst = TensorDyn::image(
640,
480,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let mut dst_dyn = dst;
let det = [DetectBox {
bbox: edgefirst_decoder::BoundingBox {
xmin: 0.1,
ymin: 0.1,
xmax: 0.5,
ymax: 0.5,
},
score: 0.9,
label: 0,
}];
let proto_data = {
use edgefirst_tensor::{Tensor, TensorDyn};
let coeff_t = Tensor::<f32>::from_slice(&[0.5_f32; 4], &[1, 4]).unwrap();
let protos_t =
Tensor::<f32>::from_slice(&vec![0.0_f32; 8 * 8 * 4], &[8, 8, 4]).unwrap();
ProtoData {
mask_coefficients: TensorDyn::F32(coeff_t),
protos: TensorDyn::F32(protos_t),
}
};
let result =
converter.draw_proto_masks(&mut dst_dyn, &det, &proto_data, Default::default());
assert!(
matches!(&result, Err(Error::Internal(s)) if s.contains("CPU backend")),
"draw_proto_masks without CPU should return Internal error: {result:?}"
);
}
#[test]
fn test_draw_proto_masks_cpu_fallback_works() {
let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
let result = ImageProcessor::new();
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
let mut converter = result.unwrap();
assert!(converter.cpu.is_some());
let dst = TensorDyn::image(
64,
64,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Mem),
)
.unwrap();
let mut dst_dyn = dst;
let det = [DetectBox {
bbox: edgefirst_decoder::BoundingBox {
xmin: 0.1,
ymin: 0.1,
xmax: 0.5,
ymax: 0.5,
},
score: 0.9,
label: 0,
}];
let proto_data = {
use edgefirst_tensor::{Tensor, TensorDyn};
let coeff_t = Tensor::<f32>::from_slice(&[0.5_f32; 4], &[1, 4]).unwrap();
let protos_t =
Tensor::<f32>::from_slice(&vec![0.0_f32; 8 * 8 * 4], &[8, 8, 4]).unwrap();
ProtoData {
mask_coefficients: TensorDyn::F32(coeff_t),
protos: TensorDyn::F32(protos_t),
}
};
let result =
converter.draw_proto_masks(&mut dst_dyn, &det, &proto_data, Default::default());
assert!(result.is_ok(), "CPU fallback path should work: {result:?}");
}
fn with_force_backend<R>(value: Option<&str>, body: impl FnOnce() -> R) -> R {
use std::sync::{Mutex, MutexGuard, OnceLock};
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
let _guard: MutexGuard<()> = LOCK
.get_or_init(|| Mutex::new(()))
.lock()
.unwrap_or_else(|e| e.into_inner());
let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
match value {
Some(v) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", v) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
let r = body();
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
r
}
fn make_dirty_dst(w: usize, h: usize, mem: Option<TensorMemory>) -> TensorDyn {
let dst = TensorDyn::image(w, h, PixelFormat::Rgba, DType::U8, mem).unwrap();
{
use edgefirst_tensor::TensorMapTrait;
let u8t = dst.as_u8().unwrap();
let mut map = u8t.map().unwrap();
for (i, b) in map.as_mut_slice().iter_mut().enumerate() {
*b = 0xA0u8.wrapping_add((i as u8) & 0x3F);
}
}
dst
}
fn make_bg(w: usize, h: usize, mem: Option<TensorMemory>, rgba: [u8; 4]) -> TensorDyn {
let bg = TensorDyn::image(w, h, PixelFormat::Rgba, DType::U8, mem).unwrap();
{
use edgefirst_tensor::TensorMapTrait;
let u8t = bg.as_u8().unwrap();
let mut map = u8t.map().unwrap();
for chunk in map.as_mut_slice().chunks_exact_mut(4) {
chunk.copy_from_slice(&rgba);
}
}
bg
}
fn pixel_at(dst: &TensorDyn, x: usize, y: usize) -> [u8; 4] {
use edgefirst_tensor::TensorMapTrait;
let w = dst.width().unwrap();
let off = (y * w + x) * 4;
let u8t = dst.as_u8().unwrap();
let map = u8t.map().unwrap();
let s = map.as_slice();
[s[off], s[off + 1], s[off + 2], s[off + 3]]
}
fn assert_every_pixel_eq(dst: &TensorDyn, expected: [u8; 4], case: &str) {
use edgefirst_tensor::TensorMapTrait;
let u8t = dst.as_u8().unwrap();
let map = u8t.map().unwrap();
for (i, chunk) in map.as_slice().chunks_exact(4).enumerate() {
assert_eq!(
chunk, &expected,
"{case}: pixel idx {i} = {chunk:?}, expected {expected:?}"
);
}
}
fn scenario_empty_no_bg(processor: &mut ImageProcessor, case: &str) {
let mut dst = make_dirty_dst(64, 64, None);
processor
.draw_decoded_masks(&mut dst, &[], &[], MaskOverlay::default())
.unwrap_or_else(|e| panic!("{case}/decoded_masks empty+no-bg failed: {e:?}"));
assert_every_pixel_eq(&dst, [0, 0, 0, 0], &format!("{case}/decoded"));
let mut dst = make_dirty_dst(64, 64, None);
let proto = {
use edgefirst_tensor::{Tensor, TensorDyn};
let coeff_t = Tensor::<f32>::from_slice(&[0.0_f32; 4], &[1, 4]).unwrap();
let protos_t =
Tensor::<f32>::from_slice(&vec![0.0_f32; 8 * 8 * 4], &[8, 8, 4]).unwrap();
ProtoData {
mask_coefficients: TensorDyn::F32(coeff_t),
protos: TensorDyn::F32(protos_t),
}
};
processor
.draw_proto_masks(&mut dst, &[], &proto, MaskOverlay::default())
.unwrap_or_else(|e| panic!("{case}/proto_masks empty+no-bg failed: {e:?}"));
assert_every_pixel_eq(&dst, [0, 0, 0, 0], &format!("{case}/proto"));
}
fn scenario_empty_with_bg(processor: &mut ImageProcessor, case: &str) {
let bg_color = [42, 99, 200, 255];
let bg = make_bg(64, 64, None, bg_color);
let overlay = MaskOverlay::new().with_background(&bg);
let mut dst = make_dirty_dst(64, 64, None);
processor
.draw_decoded_masks(&mut dst, &[], &[], overlay)
.unwrap_or_else(|e| panic!("{case}/decoded_masks empty+bg failed: {e:?}"));
assert_every_pixel_eq(&dst, bg_color, &format!("{case}/decoded bg blit"));
let mut dst = make_dirty_dst(64, 64, None);
let proto = {
use edgefirst_tensor::{Tensor, TensorDyn};
let coeff_t = Tensor::<f32>::from_slice(&[0.0_f32; 4], &[1, 4]).unwrap();
let protos_t =
Tensor::<f32>::from_slice(&vec![0.0_f32; 8 * 8 * 4], &[8, 8, 4]).unwrap();
ProtoData {
mask_coefficients: TensorDyn::F32(coeff_t),
protos: TensorDyn::F32(protos_t),
}
};
processor
.draw_proto_masks(&mut dst, &[], &proto, overlay)
.unwrap_or_else(|e| panic!("{case}/proto_masks empty+bg failed: {e:?}"));
assert_every_pixel_eq(&dst, bg_color, &format!("{case}/proto bg blit"));
}
fn scenario_detect_no_bg(processor: &mut ImageProcessor, case: &str) {
use edgefirst_decoder::Segmentation;
use ndarray::Array3;
processor
.set_class_colors(&[[200, 80, 40, 255]])
.expect("set_class_colors");
let detect = DetectBox {
bbox: [0.25, 0.25, 0.75, 0.75].into(),
score: 0.99,
label: 0,
};
let seg_arr = Array3::from_shape_fn((4, 4, 1), |_| 255u8);
let seg = Segmentation {
segmentation: seg_arr,
xmin: 0.25,
ymin: 0.25,
xmax: 0.75,
ymax: 0.75,
};
let mut dst = make_dirty_dst(64, 64, None);
processor
.draw_decoded_masks(&mut dst, &[detect], &[seg], MaskOverlay::default())
.unwrap_or_else(|e| panic!("{case}/decoded_masks detect+no-bg failed: {e:?}"));
let corner = pixel_at(&dst, 2, 2);
assert_eq!(
corner,
[0, 0, 0, 0],
"{case}/decoded: corner (2,2) leaked dirty pattern: {corner:?}"
);
let center = pixel_at(&dst, 32, 32);
assert!(
center != [0, 0, 0, 0],
"{case}/decoded: center (32,32) was not coloured: {center:?}"
);
}
fn scenario_detect_with_bg(processor: &mut ImageProcessor, case: &str) {
use edgefirst_decoder::Segmentation;
use ndarray::Array3;
processor
.set_class_colors(&[[200, 80, 40, 255]])
.expect("set_class_colors");
let bg_color = [10, 20, 30, 255];
let bg = make_bg(64, 64, None, bg_color);
let detect = DetectBox {
bbox: [0.25, 0.25, 0.75, 0.75].into(),
score: 0.99,
label: 0,
};
let seg_arr = Array3::from_shape_fn((4, 4, 1), |_| 255u8);
let seg = Segmentation {
segmentation: seg_arr,
xmin: 0.25,
ymin: 0.25,
xmax: 0.75,
ymax: 0.75,
};
let overlay = MaskOverlay::new().with_background(&bg);
let mut dst = make_dirty_dst(64, 64, None);
processor
.draw_decoded_masks(&mut dst, &[detect], &[seg], overlay)
.unwrap_or_else(|e| panic!("{case}/decoded_masks detect+bg failed: {e:?}"));
let corner = pixel_at(&dst, 2, 2);
assert_eq!(
corner, bg_color,
"{case}/decoded: corner (2,2) should show bg {bg_color:?} got {corner:?}"
);
let center = pixel_at(&dst, 32, 32);
assert!(
center != bg_color,
"{case}/decoded: center (32,32) should differ from bg {bg_color:?}, got {center:?}"
);
}
fn run_all_scenarios(
force_backend: Option<&'static str>,
case: &'static str,
require_dma_for_bg: bool,
) {
if require_dma_for_bg && !edgefirst_tensor::is_dma_available() {
eprintln!("SKIPPED: {case} — DMA not available on this host");
return;
}
let processor_result = with_force_backend(force_backend, ImageProcessor::new);
let mut processor = match processor_result {
Ok(p) => p,
Err(e) => {
eprintln!("SKIPPED: {case} — backend init failed: {e:?}");
return;
}
};
scenario_empty_no_bg(&mut processor, case);
scenario_empty_with_bg(&mut processor, case);
scenario_detect_no_bg(&mut processor, case);
scenario_detect_with_bg(&mut processor, case);
}
#[test]
fn test_draw_masks_4_scenarios_cpu() {
run_all_scenarios(Some("cpu"), "cpu", false);
}
#[test]
fn test_draw_masks_4_scenarios_auto() {
run_all_scenarios(None, "auto", false);
}
#[cfg(target_os = "linux")]
#[cfg(feature = "opengl")]
#[test]
fn test_draw_masks_4_scenarios_opengl() {
run_all_scenarios(Some("opengl"), "opengl", false);
}
#[cfg(target_os = "linux")]
#[test]
fn test_draw_masks_zero_detection_g2d_forced() {
if !edgefirst_tensor::is_dma_available() {
eprintln!("SKIPPED: g2d forced — DMA not available on this host");
return;
}
let processor_result = with_force_backend(Some("g2d"), ImageProcessor::new);
let mut processor = match processor_result {
Ok(p) => p,
Err(e) => {
eprintln!("SKIPPED: g2d forced — init failed: {e:?}");
return;
}
};
let mut dst = TensorDyn::image(
64,
64,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
{
use edgefirst_tensor::TensorMapTrait;
let u8t = dst.as_u8_mut().unwrap();
let mut map = u8t.map().unwrap();
map.as_mut_slice().fill(0xBB);
}
processor
.draw_decoded_masks(&mut dst, &[], &[], MaskOverlay::default())
.expect("g2d empty+no-bg");
assert_every_pixel_eq(&dst, [0, 0, 0, 0], "g2d/case1 cleared");
let bg_color = [7, 11, 13, 255];
let bg = {
let t = TensorDyn::image(
64,
64,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
{
use edgefirst_tensor::TensorMapTrait;
let u8t = t.as_u8().unwrap();
let mut map = u8t.map().unwrap();
for chunk in map.as_mut_slice().chunks_exact_mut(4) {
chunk.copy_from_slice(&bg_color);
}
}
t
};
let mut dst = TensorDyn::image(
64,
64,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
{
use edgefirst_tensor::TensorMapTrait;
let u8t = dst.as_u8_mut().unwrap();
let mut map = u8t.map().unwrap();
map.as_mut_slice().fill(0x55);
}
processor
.draw_decoded_masks(&mut dst, &[], &[], MaskOverlay::new().with_background(&bg))
.expect("g2d empty+bg");
assert_every_pixel_eq(&dst, bg_color, "g2d/case2 bg blit");
let detect = DetectBox {
bbox: [0.25, 0.25, 0.75, 0.75].into(),
score: 0.9,
label: 0,
};
let mut dst = TensorDyn::image(
64,
64,
PixelFormat::Rgba,
DType::U8,
Some(TensorMemory::Dma),
)
.unwrap();
let err = processor
.draw_decoded_masks(&mut dst, &[detect], &[], MaskOverlay::default())
.expect_err("g2d must reject detect-present draw_decoded_masks");
assert!(
matches!(err, Error::NotImplemented(_)),
"g2d case3 wrong error: {err:?}"
);
}
#[test]
fn test_set_format_then_cpu_convert() {
let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
let mut processor = ImageProcessor::new().unwrap();
match original {
Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
}
let image = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../testdata/zidane.jpg"
));
let src = load_image(image, Some(PixelFormat::Rgba), None).unwrap();
let mut dst =
TensorDyn::new(&[640, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
dst.set_format(PixelFormat::Rgb).unwrap();
processor
.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
.unwrap();
assert_eq!(dst.format(), Some(PixelFormat::Rgb));
assert_eq!(dst.width(), Some(640));
assert_eq!(dst.height(), Some(640));
}
#[test]
fn test_multiple_image_processors_same_thread() {
let mut processors: Vec<ImageProcessor> = (0..4)
.map(|_| ImageProcessor::new().expect("ImageProcessor::new() failed"))
.collect();
for proc in &mut processors {
let src = proc
.create_image(128, 128, PixelFormat::Rgb, DType::U8, None)
.expect("create src failed");
let mut dst = proc
.create_image(64, 64, PixelFormat::Rgb, DType::U8, None)
.expect("create dst failed");
proc.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
.expect("convert failed");
assert_eq!(dst.width(), Some(64));
assert_eq!(dst.height(), Some(64));
}
}
#[test]
fn test_multiple_image_processors_separate_threads() {
use std::sync::mpsc;
use std::time::Duration;
const TIMEOUT: Duration = Duration::from_secs(60);
let (tx, rx) = mpsc::channel::<()>();
std::thread::spawn(move || {
let handles: Vec<_> = (0..4)
.map(|i| {
std::thread::spawn(move || {
let mut proc = ImageProcessor::new().unwrap_or_else(|e| {
panic!("ImageProcessor::new() failed on thread {i}: {e}")
});
let src = proc
.create_image(128, 128, PixelFormat::Rgb, DType::U8, None)
.unwrap_or_else(|e| panic!("create src failed on thread {i}: {e}"));
let mut dst = proc
.create_image(64, 64, PixelFormat::Rgb, DType::U8, None)
.unwrap_or_else(|e| panic!("create dst failed on thread {i}: {e}"));
proc.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
.unwrap_or_else(|e| panic!("convert failed on thread {i}: {e}"));
assert_eq!(dst.width(), Some(64));
assert_eq!(dst.height(), Some(64));
})
})
.collect();
for (i, h) in handles.into_iter().enumerate() {
h.join()
.unwrap_or_else(|e| panic!("thread {i} panicked: {e:?}"));
}
let _ = tx.send(());
});
rx.recv_timeout(TIMEOUT).unwrap_or_else(|_| {
panic!("test_multiple_image_processors_separate_threads timed out after {TIMEOUT:?}")
});
}
#[test]
fn test_image_processors_concurrent_operations() {
use std::sync::{mpsc, Arc, Barrier};
use std::time::Duration;
const N: usize = 4;
const ROUNDS: usize = 10;
const TIMEOUT: Duration = Duration::from_secs(60);
let (tx, rx) = mpsc::channel::<()>();
std::thread::spawn(move || {
let barrier = Arc::new(Barrier::new(N));
let handles: Vec<_> = (0..N)
.map(|i| {
let barrier = Arc::clone(&barrier);
std::thread::spawn(move || {
let mut proc = ImageProcessor::new().unwrap_or_else(|e| {
panic!("ImageProcessor::new() failed on thread {i}: {e}")
});
barrier.wait();
for round in 0..ROUNDS {
let src = proc
.create_image(128, 128, PixelFormat::Rgb, DType::U8, None)
.unwrap_or_else(|e| {
panic!("create src failed on thread {i} round {round}: {e}")
});
let mut dst = proc
.create_image(64, 64, PixelFormat::Rgb, DType::U8, None)
.unwrap_or_else(|e| {
panic!("create dst failed on thread {i} round {round}: {e}")
});
proc.convert(
&src,
&mut dst,
Rotation::None,
Flip::None,
Crop::default(),
)
.unwrap_or_else(|e| {
panic!("convert failed on thread {i} round {round}: {e}")
});
assert_eq!(dst.width(), Some(64));
assert_eq!(dst.height(), Some(64));
}
})
})
.collect();
for (i, h) in handles.into_iter().enumerate() {
h.join()
.unwrap_or_else(|e| panic!("thread {i} panicked: {e:?}"));
}
let _ = tx.send(());
});
rx.recv_timeout(TIMEOUT).unwrap_or_else(|_| {
panic!("test_image_processors_concurrent_operations timed out after {TIMEOUT:?}")
});
}
}