use yuv::{
BufferStoreMut, YuvChromaSubsampling, YuvConversionMode, YuvPlanarImageMut, YuvRange,
YuvStandardMatrix, rgb_to_yuv420, rgb_to_yuv444,
};
#[allow(dead_code)] pub fn rgb_to_ycbcr_fast(
rgb_data: &[u8],
y_plane: &mut [f32],
cb_plane: &mut [f32],
cr_plane: &mut [f32],
width: usize,
height: usize,
) {
let num_pixels = width * height;
debug_assert!(rgb_data.len() >= num_pixels * 3);
debug_assert!(y_plane.len() >= num_pixels);
debug_assert!(cb_plane.len() >= num_pixels);
debug_assert!(cr_plane.len() >= num_pixels);
let mut yuv_image =
YuvPlanarImageMut::alloc(width as u32, height as u32, YuvChromaSubsampling::Yuv444);
rgb_to_yuv444(
&mut yuv_image,
rgb_data,
width as u32 * 3,
YuvRange::Full,
YuvStandardMatrix::Bt601,
YuvConversionMode::Professional,
)
.expect("yuv conversion failed");
let y_u8 = yuv_image.y_plane.borrow();
let cb_u8 = yuv_image.u_plane.borrow();
let cr_u8 = yuv_image.v_plane.borrow();
for i in 0..num_pixels {
y_plane[i] = y_u8[i] as f32;
cb_plane[i] = cb_u8[i] as f32;
cr_plane[i] = cr_u8[i] as f32;
}
}
pub fn rgb_to_ycbcr_strided_fast(
rgb_data: &[u8],
y_plane: &mut [f32],
cb_plane: &mut [f32],
cr_plane: &mut [f32],
width: usize,
height: usize,
y_stride: usize,
bpp: usize,
) {
debug_assert!(rgb_data.len() >= width * height * bpp);
debug_assert!(y_plane.len() >= y_stride * height);
debug_assert!(cb_plane.len() >= width * height);
debug_assert!(cr_plane.len() >= width * height);
let rgb_only: Vec<u8>;
let rgb_input = if bpp == 4 {
rgb_only = rgb_data
.chunks_exact(4)
.take(width * height)
.flat_map(|chunk| [chunk[0], chunk[1], chunk[2]])
.collect();
&rgb_only
} else {
rgb_data
};
let mut yuv_image =
YuvPlanarImageMut::alloc(width as u32, height as u32, YuvChromaSubsampling::Yuv444);
rgb_to_yuv444(
&mut yuv_image,
rgb_input,
width as u32 * 3,
YuvRange::Full,
YuvStandardMatrix::Bt601,
YuvConversionMode::Professional,
)
.expect("yuv conversion failed");
let y_u8 = yuv_image.y_plane.borrow();
let cb_u8 = yuv_image.u_plane.borrow();
let cr_u8 = yuv_image.v_plane.borrow();
if y_stride == width {
let num_pixels = width * height;
for i in 0..num_pixels {
y_plane[i] = y_u8[i] as f32;
cb_plane[i] = cb_u8[i] as f32;
cr_plane[i] = cr_u8[i] as f32;
}
} else {
for row in 0..height {
let src_start = row * width;
let y_dst_start = row * y_stride;
let cbcr_dst_start = row * width;
for x in 0..width {
y_plane[y_dst_start + x] = y_u8[src_start + x] as f32;
cb_plane[cbcr_dst_start + x] = cb_u8[src_start + x] as f32;
cr_plane[cbcr_dst_start + x] = cr_u8[src_start + x] as f32;
}
if width < y_stride {
let edge_val = y_plane[y_dst_start + width - 1];
for x in width..y_stride {
y_plane[y_dst_start + x] = edge_val;
}
}
}
}
}
pub fn rgb_to_ycbcr_strided_reuse(
rgb_data: &[u8],
y_plane: &mut [f32],
cb_plane: &mut [f32],
cr_plane: &mut [f32],
yuv_temp_y: &mut [u8],
yuv_temp_cb: &mut [u8],
yuv_temp_cr: &mut [u8],
width: usize,
height: usize,
y_stride: usize,
bpp: usize,
) {
let num_pixels = width * height;
debug_assert!(rgb_data.len() >= num_pixels * bpp);
debug_assert!(y_plane.len() >= y_stride * height);
debug_assert!(cb_plane.len() >= num_pixels);
debug_assert!(cr_plane.len() >= num_pixels);
debug_assert!(yuv_temp_y.len() >= num_pixels);
debug_assert!(yuv_temp_cb.len() >= num_pixels);
debug_assert!(yuv_temp_cr.len() >= num_pixels);
let rgb_only: Vec<u8>;
let rgb_input = if bpp == 4 {
rgb_only = rgb_data
.chunks_exact(4)
.take(num_pixels)
.flat_map(|chunk| [chunk[0], chunk[1], chunk[2]])
.collect();
&rgb_only
} else {
rgb_data
};
let mut yuv_image = YuvPlanarImageMut {
y_plane: BufferStoreMut::Borrowed(&mut yuv_temp_y[..num_pixels]),
y_stride: width as u32,
u_plane: BufferStoreMut::Borrowed(&mut yuv_temp_cb[..num_pixels]),
u_stride: width as u32,
v_plane: BufferStoreMut::Borrowed(&mut yuv_temp_cr[..num_pixels]),
v_stride: width as u32,
width: width as u32,
height: height as u32,
};
rgb_to_yuv444(
&mut yuv_image,
rgb_input,
width as u32 * 3,
YuvRange::Full,
YuvStandardMatrix::Bt601,
YuvConversionMode::Professional,
)
.expect("yuv conversion failed");
let y_u8 = yuv_image.y_plane.borrow();
let cb_u8 = yuv_image.u_plane.borrow();
let cr_u8 = yuv_image.v_plane.borrow();
if y_stride == width {
for i in 0..num_pixels {
y_plane[i] = y_u8[i] as f32;
cb_plane[i] = cb_u8[i] as f32;
cr_plane[i] = cr_u8[i] as f32;
}
} else {
for row in 0..height {
let src_start = row * width;
let y_dst_start = row * y_stride;
let cbcr_dst_start = row * width;
for x in 0..width {
y_plane[y_dst_start + x] = y_u8[src_start + x] as f32;
cb_plane[cbcr_dst_start + x] = cb_u8[src_start + x] as f32;
cr_plane[cbcr_dst_start + x] = cr_u8[src_start + x] as f32;
}
if width < y_stride {
let edge_val = y_plane[y_dst_start + width - 1];
for x in width..y_stride {
y_plane[y_dst_start + x] = edge_val;
}
}
}
}
}
pub fn bgr_to_ycbcr_strided_reuse(
bgr_data: &[u8],
y_plane: &mut [f32],
cb_plane: &mut [f32],
cr_plane: &mut [f32],
yuv_temp_y: &mut [u8],
yuv_temp_cb: &mut [u8],
yuv_temp_cr: &mut [u8],
width: usize,
height: usize,
y_stride: usize,
bpp: usize,
) {
let num_pixels = width * height;
debug_assert!(bgr_data.len() >= num_pixels * bpp);
debug_assert!(y_plane.len() >= y_stride * height);
debug_assert!(cb_plane.len() >= num_pixels);
debug_assert!(cr_plane.len() >= num_pixels);
debug_assert!(yuv_temp_y.len() >= num_pixels);
debug_assert!(yuv_temp_cb.len() >= num_pixels);
debug_assert!(yuv_temp_cr.len() >= num_pixels);
let rgb_converted: Vec<u8> = if bpp == 4 {
bgr_data
.chunks_exact(4)
.take(num_pixels)
.flat_map(|chunk| [chunk[2], chunk[1], chunk[0]]) .collect()
} else {
bgr_data
.chunks_exact(3)
.take(num_pixels)
.flat_map(|chunk| [chunk[2], chunk[1], chunk[0]]) .collect()
};
let mut yuv_image = YuvPlanarImageMut {
y_plane: BufferStoreMut::Borrowed(&mut yuv_temp_y[..num_pixels]),
y_stride: width as u32,
u_plane: BufferStoreMut::Borrowed(&mut yuv_temp_cb[..num_pixels]),
u_stride: width as u32,
v_plane: BufferStoreMut::Borrowed(&mut yuv_temp_cr[..num_pixels]),
v_stride: width as u32,
width: width as u32,
height: height as u32,
};
rgb_to_yuv444(
&mut yuv_image,
&rgb_converted,
width as u32 * 3,
YuvRange::Full,
YuvStandardMatrix::Bt601,
YuvConversionMode::Professional,
)
.expect("yuv conversion failed");
let y_u8 = yuv_image.y_plane.borrow();
let cb_u8 = yuv_image.u_plane.borrow();
let cr_u8 = yuv_image.v_plane.borrow();
if y_stride == width {
for i in 0..num_pixels {
y_plane[i] = y_u8[i] as f32;
cb_plane[i] = cb_u8[i] as f32;
cr_plane[i] = cr_u8[i] as f32;
}
} else {
for row in 0..height {
let src_start = row * width;
let y_dst_start = row * y_stride;
let cbcr_dst_start = row * width;
for x in 0..width {
y_plane[y_dst_start + x] = y_u8[src_start + x] as f32;
cb_plane[cbcr_dst_start + x] = cb_u8[src_start + x] as f32;
cr_plane[cbcr_dst_start + x] = cr_u8[src_start + x] as f32;
}
if width < y_stride {
let edge_val = y_plane[y_dst_start + width - 1];
for x in width..y_stride {
y_plane[y_dst_start + x] = edge_val;
}
}
}
}
}
pub fn bgr_to_ycbcr_strided_fast(
bgr_data: &[u8],
y_plane: &mut [f32],
cb_plane: &mut [f32],
cr_plane: &mut [f32],
width: usize,
height: usize,
y_stride: usize,
bpp: usize,
) {
debug_assert!(bgr_data.len() >= width * height * bpp);
debug_assert!(y_plane.len() >= y_stride * height);
debug_assert!(cb_plane.len() >= width * height);
debug_assert!(cr_plane.len() >= width * height);
let rgb_data: Vec<u8> = if bpp == 4 {
bgr_data
.chunks_exact(4)
.take(width * height)
.flat_map(|chunk| [chunk[2], chunk[1], chunk[0]]) .collect()
} else {
bgr_data
.chunks_exact(3)
.take(width * height)
.flat_map(|chunk| [chunk[2], chunk[1], chunk[0]]) .collect()
};
rgb_to_ycbcr_strided_fast(
&rgb_data, y_plane, cb_plane, cr_plane, width, height, y_stride, 3,
);
}
pub fn rgb_to_ycbcr_420_reuse(
rgb_data: &[u8],
y_plane: &mut [f32],
cb_down: &mut [f32],
cr_down: &mut [f32],
yuv_temp_y: &mut [u8],
yuv_temp_cb: &mut [u8],
yuv_temp_cr: &mut [u8],
width: usize,
height: usize,
y_stride: usize,
bpp: usize,
) {
let num_pixels = width * height;
let c_width = (width + 1) / 2;
let c_height = (height + 1) / 2;
let c_size = c_width * c_height;
debug_assert!(rgb_data.len() >= num_pixels * bpp);
debug_assert!(y_plane.len() >= y_stride * height);
debug_assert!(cb_down.len() >= c_size);
debug_assert!(cr_down.len() >= c_size);
debug_assert!(yuv_temp_y.len() >= num_pixels);
debug_assert!(yuv_temp_cb.len() >= c_size);
debug_assert!(yuv_temp_cr.len() >= c_size);
let rgb_only: Vec<u8>;
let rgb_input = if bpp == 4 {
rgb_only = rgb_data
.chunks_exact(4)
.take(num_pixels)
.flat_map(|chunk| [chunk[0], chunk[1], chunk[2]])
.collect();
&rgb_only
} else {
rgb_data
};
let mut yuv_image = YuvPlanarImageMut {
y_plane: BufferStoreMut::Borrowed(&mut yuv_temp_y[..num_pixels]),
y_stride: width as u32,
u_plane: BufferStoreMut::Borrowed(&mut yuv_temp_cb[..c_size]),
u_stride: c_width as u32,
v_plane: BufferStoreMut::Borrowed(&mut yuv_temp_cr[..c_size]),
v_stride: c_width as u32,
width: width as u32,
height: height as u32,
};
rgb_to_yuv420(
&mut yuv_image,
rgb_input,
width as u32 * 3,
YuvRange::Full,
YuvStandardMatrix::Bt601,
YuvConversionMode::Professional,
)
.expect("yuv 420 conversion failed");
let y_u8 = yuv_image.y_plane.borrow();
let cb_u8 = yuv_image.u_plane.borrow();
let cr_u8 = yuv_image.v_plane.borrow();
if y_stride == width {
for i in 0..num_pixels {
y_plane[i] = y_u8[i] as f32;
}
} else {
for row in 0..height {
let src_start = row * width;
let dst_start = row * y_stride;
for x in 0..width {
y_plane[dst_start + x] = y_u8[src_start + x] as f32;
}
}
}
for i in 0..c_size {
cb_down[i] = cb_u8[i] as f32;
cr_down[i] = cr_u8[i] as f32;
}
}
pub fn bgr_to_ycbcr_420_reuse(
bgr_data: &[u8],
y_plane: &mut [f32],
cb_down: &mut [f32],
cr_down: &mut [f32],
yuv_temp_y: &mut [u8],
yuv_temp_cb: &mut [u8],
yuv_temp_cr: &mut [u8],
width: usize,
height: usize,
y_stride: usize,
bpp: usize,
) {
let num_pixels = width * height;
let rgb_converted: Vec<u8> = if bpp == 4 {
bgr_data
.chunks_exact(4)
.take(num_pixels)
.flat_map(|chunk| [chunk[2], chunk[1], chunk[0]]) .collect()
} else {
bgr_data
.chunks_exact(3)
.take(num_pixels)
.flat_map(|chunk| [chunk[2], chunk[1], chunk[0]]) .collect()
};
rgb_to_ycbcr_420_reuse(
&rgb_converted,
y_plane,
cb_down,
cr_down,
yuv_temp_y,
yuv_temp_cb,
yuv_temp_cr,
width,
height,
y_stride,
3,
);
}
#[cfg(test)]
mod tests {
use super::*;
use crate::color::rgb_to_ycbcr_f32;
#[test]
fn test_fast_yuv_matches_f32() {
let width = 64;
let height = 64;
let num_pixels = width * height;
let mut rgb = vec![0u8; num_pixels * 3];
for i in 0..num_pixels {
rgb[i * 3] = (i % 256) as u8;
rgb[i * 3 + 1] = ((i * 7) % 256) as u8;
rgb[i * 3 + 2] = ((i * 13) % 256) as u8;
}
let mut y_fast = vec![0.0f32; num_pixels];
let mut cb_fast = vec![0.0f32; num_pixels];
let mut cr_fast = vec![0.0f32; num_pixels];
rgb_to_ycbcr_fast(&rgb, &mut y_fast, &mut cb_fast, &mut cr_fast, width, height);
let mut max_y_diff = 0.0f32;
let mut max_cb_diff = 0.0f32;
let mut max_cr_diff = 0.0f32;
for i in 0..num_pixels {
let (y_ref, cb_ref, cr_ref) = rgb_to_ycbcr_f32(
rgb[i * 3] as f32,
rgb[i * 3 + 1] as f32,
rgb[i * 3 + 2] as f32,
);
max_y_diff = max_y_diff.max((y_fast[i] - y_ref).abs());
max_cb_diff = max_cb_diff.max((cb_fast[i] - cb_ref).abs());
max_cr_diff = max_cr_diff.max((cr_fast[i] - cr_ref).abs());
}
assert!(max_y_diff < 1.5, "Y diff {} exceeds threshold", max_y_diff);
assert!(
max_cb_diff < 1.5,
"Cb diff {} exceeds threshold",
max_cb_diff
);
assert!(
max_cr_diff < 1.5,
"Cr diff {} exceeds threshold",
max_cr_diff
);
}
#[test]
fn test_fast_yuv_strided() {
let width = 60; let height = 4;
let y_stride = 64; let num_pixels = width * height;
let rgb = vec![128u8; num_pixels * 3];
let mut y_plane = vec![0.0f32; y_stride * height];
let mut cb_plane = vec![0.0f32; num_pixels];
let mut cr_plane = vec![0.0f32; num_pixels];
rgb_to_ycbcr_strided_fast(
&rgb,
&mut y_plane,
&mut cb_plane,
&mut cr_plane,
width,
height,
y_stride,
3,
);
for row in 0..height {
for x in 0..width {
let y = y_plane[row * y_stride + x];
assert!((y - 128.0).abs() < 2.0, "Gray Y={} at ({}, {})", y, x, row);
}
}
}
}