use crate::internals::ProcessedOffset;
use crate::neon::utils::neon_vld_rgb_for_yuv;
use crate::yuv_support::{CbCrForwardTransform, YuvChromaRange, YuvNVOrder, YuvSourceChannels};
use std::arch::aarch64::*;
#[allow(clippy::cast_abs_to_unsigned)]
pub(crate) unsafe fn neon_rgbx_to_nv_fast420<const ORIGIN_CHANNELS: u8, const UV_ORDER: u8>(
y_plane0: &mut [u8],
y_plane1: &mut [u8],
uv_plane: &mut [u8],
rgba0: &[u8],
rgba1: &[u8],
width: u32,
range: &YuvChromaRange,
transform: &CbCrForwardTransform<i32>,
start_cx: usize,
start_ux: usize,
) -> ProcessedOffset {
let source_channels: YuvSourceChannels = ORIGIN_CHANNELS.into();
let uv_order: YuvNVOrder = UV_ORDER.into();
let channels = source_channels.get_channels_count();
const A_E: i32 = 7;
let y_bias = vdupq_n_u16(range.bias_y as u16 * (1 << A_E) + (1 << (A_E - 1)) - 1);
let uv_bias = vdupq_n_u16(range.bias_uv as u16 * (1 << A_E) + (1 << (A_E - 1)) - 1);
let mut cx = start_cx;
let mut ux = start_ux;
let v_yr = vdupq_n_u8(transform.yr as u8);
let v_yg = vdupq_n_u8(transform.yg as u8);
let v_yb = vdupq_n_u8(transform.yb as u8);
let v_cb_r = vdup_n_u8(transform.cb_r.abs() as u8);
let v_cb_g = vdup_n_u8(transform.cb_g.abs() as u8);
let v_cb_b = vdup_n_u8(transform.cb_b.abs() as u8);
let v_cr_r = vdup_n_u8(transform.cr_r.abs() as u8);
let v_cr_g = vdup_n_u8(transform.cr_g.abs() as u8);
let v_cr_b = vdup_n_u8(transform.cr_b.abs() as u8);
while cx + 16 <= width as usize {
let src0 = rgba0.get_unchecked(cx * channels..).as_ptr();
let src1 = rgba1.get_unchecked(cx * channels..).as_ptr();
let (r_values0, g_values0, b_values0) = neon_vld_rgb_for_yuv::<ORIGIN_CHANNELS>(src0);
let (r_values1, g_values1, b_values1) = neon_vld_rgb_for_yuv::<ORIGIN_CHANNELS>(src1);
let mut yh0 = vmlal_high_u8(y_bias, r_values0, v_yr);
let mut yh1 = vmlal_high_u8(y_bias, r_values1, v_yr);
let mut yl0 = vmlal_u8(y_bias, vget_low_u8(r_values0), vget_low_u8(v_yr));
let mut yl1 = vmlal_u8(y_bias, vget_low_u8(r_values1), vget_low_u8(v_yr));
yh0 = vmlal_high_u8(yh0, g_values0, v_yg);
yh1 = vmlal_high_u8(yh1, g_values1, v_yg);
yl0 = vmlal_u8(yl0, vget_low_u8(g_values0), vget_low_u8(v_yg));
yl1 = vmlal_u8(yl1, vget_low_u8(g_values1), vget_low_u8(v_yg));
yh0 = vmlal_high_u8(yh0, b_values0, v_yb);
yh1 = vmlal_high_u8(yh1, b_values1, v_yb);
yl0 = vmlal_u8(yl0, vget_low_u8(b_values0), vget_low_u8(v_yb));
yl1 = vmlal_u8(yl1, vget_low_u8(b_values1), vget_low_u8(v_yb));
let yn_0 = vqshrn_n_u16::<A_E>(yl0);
let yn_1 = vqshrn_n_u16::<A_E>(yh0);
let yn_2 = vqshrn_n_u16::<A_E>(yl1);
let yn_3 = vqshrn_n_u16::<A_E>(yh1);
let y_vl0 = vcombine_u8(yn_0, yn_1);
let y_vl1 = vcombine_u8(yn_2, yn_3);
vst1q_u8(y_plane0.get_unchecked_mut(cx..).as_mut_ptr(), y_vl0);
vst1q_u8(y_plane1.get_unchecked_mut(cx..).as_mut_ptr(), y_vl1);
let rhv = vhaddq_u8(r_values0, r_values1);
let ghv = vhaddq_u8(g_values0, g_values1);
let bhv = vhaddq_u8(b_values0, b_values1);
let rpv = vpaddlq_u8(rhv);
let gpv = vpaddlq_u8(ghv);
let bpv = vpaddlq_u8(bhv);
let rpv = vqshrn_n_u16::<1>(rpv);
let gpv = vqshrn_n_u16::<1>(gpv);
let bpv = vqshrn_n_u16::<1>(bpv);
let mut cb_q = vmlal_u8(uv_bias, bpv, v_cb_b);
let mut cr_q = vmlal_u8(uv_bias, rpv, v_cr_r);
cb_q = vmlsl_u8(cb_q, rpv, v_cb_r);
cr_q = vmlsl_u8(cr_q, gpv, v_cr_g);
cb_q = vmlsl_u8(cb_q, gpv, v_cb_g);
cr_q = vmlsl_u8(cr_q, bpv, v_cr_b);
let mut cb = vqshrn_n_u16::<A_E>(cb_q);
let mut cr = vqshrn_n_u16::<A_E>(cr_q);
if uv_order == YuvNVOrder::VU {
std::mem::swap(&mut cb, &mut cr);
}
vst2_u8(
uv_plane.get_unchecked_mut(ux..).as_mut_ptr(),
uint8x8x2_t(cb, cr),
);
ux += 16;
cx += 16;
}
if cx < width as usize {
let diff = width as usize - cx;
assert!(diff <= 16);
let mut src_buffer0: [u8; 16 * 4] = [0; 16 * 4];
let mut src_buffer1: [u8; 16 * 4] = [0; 16 * 4];
let mut y_buffer0: [u8; 16] = [0; 16];
let mut y_buffer1: [u8; 16] = [0; 16];
let mut uv_buffer: [u8; 32] = [0; 32];
std::ptr::copy_nonoverlapping(
rgba0.get_unchecked(cx * channels..).as_ptr(),
src_buffer0.as_mut_ptr().cast(),
diff * channels,
);
std::ptr::copy_nonoverlapping(
rgba1.get_unchecked(cx * channels..).as_ptr(),
src_buffer1.as_mut_ptr().cast(),
diff * channels,
);
if diff % 2 != 0 {
let lst = (width as usize - 1) * channels;
let last_items0 = rgba0.get_unchecked(lst..(lst + channels));
let last_items1 = rgba1.get_unchecked(lst..(lst + channels));
let dvb = diff * channels;
let dst0 = src_buffer0.get_unchecked_mut(dvb..(dvb + channels));
let dst1 = src_buffer1.get_unchecked_mut(dvb..(dvb + channels));
for (dst, src) in dst0.iter_mut().zip(last_items0) {
*dst = *src;
}
for (dst, src) in dst1.iter_mut().zip(last_items1) {
*dst = *src;
}
}
let (r_values0, g_values0, b_values0) =
neon_vld_rgb_for_yuv::<ORIGIN_CHANNELS>(src_buffer0.as_ptr().cast());
let (r_values1, g_values1, b_values1) =
neon_vld_rgb_for_yuv::<ORIGIN_CHANNELS>(src_buffer1.as_ptr().cast());
let mut yh0 = vmlal_high_u8(y_bias, r_values0, v_yr);
let mut yh1 = vmlal_high_u8(y_bias, r_values1, v_yr);
let mut yl0 = vmlal_u8(y_bias, vget_low_u8(r_values0), vget_low_u8(v_yr));
let mut yl1 = vmlal_u8(y_bias, vget_low_u8(r_values1), vget_low_u8(v_yr));
yh0 = vmlal_high_u8(yh0, g_values0, v_yg);
yh1 = vmlal_high_u8(yh1, g_values1, v_yg);
yl0 = vmlal_u8(yl0, vget_low_u8(g_values0), vget_low_u8(v_yg));
yl1 = vmlal_u8(yl1, vget_low_u8(g_values1), vget_low_u8(v_yg));
yh0 = vmlal_high_u8(yh0, b_values0, v_yb);
yh1 = vmlal_high_u8(yh1, b_values1, v_yb);
yl0 = vmlal_u8(yl0, vget_low_u8(b_values0), vget_low_u8(v_yb));
yl1 = vmlal_u8(yl1, vget_low_u8(b_values1), vget_low_u8(v_yb));
let yn_0 = vqshrn_n_u16::<A_E>(yl0);
let yn_1 = vqshrn_n_u16::<A_E>(yh0);
let yn_2 = vqshrn_n_u16::<A_E>(yl1);
let yn_3 = vqshrn_n_u16::<A_E>(yh1);
let y_vl0 = vcombine_u8(yn_0, yn_1);
let y_vl1 = vcombine_u8(yn_2, yn_3);
vst1q_u8(y_buffer0.as_mut_ptr().cast(), y_vl0);
vst1q_u8(y_buffer1.as_mut_ptr().cast(), y_vl1);
let rhv = vhaddq_u8(r_values0, r_values1);
let ghv = vhaddq_u8(g_values0, g_values1);
let bhv = vhaddq_u8(b_values0, b_values1);
let rpv = vpaddlq_u8(rhv);
let gpv = vpaddlq_u8(ghv);
let bpv = vpaddlq_u8(bhv);
let rpv = vqshrn_n_u16::<1>(rpv);
let gpv = vqshrn_n_u16::<1>(gpv);
let bpv = vqshrn_n_u16::<1>(bpv);
let mut cb_q = vmlal_u8(uv_bias, bpv, v_cb_b);
let mut cr_q = vmlal_u8(uv_bias, rpv, v_cr_r);
cb_q = vmlsl_u8(cb_q, rpv, v_cb_r);
cr_q = vmlsl_u8(cr_q, gpv, v_cr_g);
cb_q = vmlsl_u8(cb_q, gpv, v_cb_g);
cr_q = vmlsl_u8(cr_q, bpv, v_cr_b);
let mut cb = vqshrn_n_u16::<A_E>(cb_q);
let mut cr = vqshrn_n_u16::<A_E>(cr_q);
if uv_order == YuvNVOrder::VU {
std::mem::swap(&mut cb, &mut cr);
}
vst2_u8(uv_buffer.as_mut_ptr().cast(), uint8x8x2_t(cb, cr));
std::ptr::copy_nonoverlapping(
y_buffer0.as_ptr().cast(),
y_plane0.get_unchecked_mut(cx..).as_mut_ptr(),
diff,
);
std::ptr::copy_nonoverlapping(
y_buffer1.as_ptr().cast(),
y_plane1.get_unchecked_mut(cx..).as_mut_ptr(),
diff,
);
let hv = diff.div_ceil(2) * 2;
std::ptr::copy_nonoverlapping(
uv_buffer.as_ptr().cast(),
uv_plane.get_unchecked_mut(ux..).as_mut_ptr(),
hv,
);
cx += diff;
ux += hv;
}
ProcessedOffset { cx, ux }
}