use core::arch::wasm32::*;
use crate::convolution::optimisations::Normalizer16;
use crate::pixels::U8x3;
use crate::{wasm32_utils, ImageView, ImageViewMut};
#[inline]
pub(crate) fn horiz_convolution(
src_view: &impl ImageView<Pixel = U8x3>,
dst_view: &mut impl ImageViewMut<Pixel = U8x3>,
offset: u32,
normalizer: &Normalizer16,
) {
let dst_height = dst_view.height();
let src_iter = src_view.iter_4_rows(offset, dst_height + offset);
let dst_iter = dst_view.iter_4_rows_mut();
for (src_rows, dst_rows) in src_iter.zip(dst_iter) {
unsafe {
horiz_convolution_four_rows(src_rows, dst_rows, normalizer);
}
}
let yy = dst_height - dst_height % 4;
let src_rows = src_view.iter_rows(yy + offset);
let dst_rows = dst_view.iter_rows_mut(yy);
for (src_row, dst_row) in src_rows.zip(dst_rows) {
unsafe {
horiz_convolution_one_row(src_row, dst_row, normalizer);
}
}
}
#[inline]
#[target_feature(enable = "simd128")]
unsafe fn horiz_convolution_four_rows(
src_rows: [&[U8x3]; 4],
dst_rows: [&mut [U8x3]; 4],
normalizer: &Normalizer16,
) {
const ZERO: v128 = i64x2(0, 0);
let precision = normalizer.precision() as u32;
let initial = i32x4_splat(1 << (precision - 1));
let src_width = src_rows[0].len();
#[rustfmt::skip]
const SH_LO: v128 = i8x16(
0, -1, 3, -1, 1, -1, 4, -1, 2, -1, 5, -1, -1, -1, -1, -1
);
#[rustfmt::skip]
const SH_HI: v128 = i8x16(
6, -1, 9, -1, 7, -1, 10, -1, 8, -1, 11, -1, -1, -1, -1, -1
);
for (dst_x, coeffs_chunk) in normalizer.chunks().iter().enumerate() {
let x_start = coeffs_chunk.start as usize;
let mut x = x_start;
let mut sss_a = [initial; 4];
let mut coeffs = coeffs_chunk.values();
let max_x = src_width.saturating_sub(5);
if x < max_x {
let coeffs_by_4 = coeffs.chunks_exact(4);
for k in coeffs_by_4 {
let mmk0 = wasm32_utils::ptr_i16_to_set1_i32(k, 0);
let mmk1 = wasm32_utils::ptr_i16_to_set1_i32(k, 2);
for i in 0..4 {
let source = wasm32_utils::load_v128(src_rows[i], x);
let pix = i8x16_swizzle(source, SH_LO);
let mut sss = sss_a[i];
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk0));
let pix = i8x16_swizzle(source, SH_HI);
sss_a[i] = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk1));
}
x += 4;
if x >= max_x {
break;
}
}
}
let max_x = src_width.saturating_sub(2);
if x < max_x {
let coeffs_by_2 = coeffs[x - x_start..].chunks_exact(2);
for k in coeffs_by_2 {
let mmk = wasm32_utils::ptr_i16_to_set1_i32(k, 0);
for i in 0..4 {
let source = wasm32_utils::loadl_i64(src_rows[i], x);
let pix = i8x16_swizzle(source, SH_LO);
sss_a[i] = i32x4_add(sss_a[i], i32x4_dot_i16x8(pix, mmk));
}
x += 2;
if x >= max_x {
break;
}
}
}
coeffs = coeffs.split_at(x - x_start).1;
for &k in coeffs {
let mmk = i32x4_splat(k as i32);
for i in 0..4 {
let pix = wasm32_utils::i32x4_extend_low_ptr_u8x3(src_rows[i], x);
sss_a[i] = i32x4_add(sss_a[i], i32x4_dot_i16x8(pix, mmk));
}
x += 1;
}
sss_a[0] = i32x4_shr(sss_a[0], precision);
sss_a[1] = i32x4_shr(sss_a[1], precision);
sss_a[2] = i32x4_shr(sss_a[2], precision);
sss_a[3] = i32x4_shr(sss_a[3], precision);
for i in 0..4 {
let sss = i16x8_narrow_i32x4(sss_a[i], ZERO);
let pixel: u32 =
i32::cast_unsigned(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss, ZERO)));
let bytes = pixel.to_le_bytes();
dst_rows[i].get_unchecked_mut(dst_x).0 = [bytes[0], bytes[1], bytes[2]];
}
}
}
#[inline]
#[target_feature(enable = "simd128")]
unsafe fn horiz_convolution_one_row(
src_row: &[U8x3],
dst_row: &mut [U8x3],
normalizer: &Normalizer16,
) {
#[rustfmt::skip]
const PIX_SH1: v128 = i8x16(
0, -1, 3, -1, 1, -1, 4, -1, 2, -1, 5, -1, -1, -1, -1, -1
);
#[rustfmt::skip]
const COEF_SH1: v128 = i8x16(
0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
);
#[rustfmt::skip]
const PIX_SH2: v128 = i8x16(
6, -1, 9, -1, 7, -1, 10, -1, 8, -1, 11, -1, -1, -1, -1, -1
);
#[rustfmt::skip]
const COEF_SH2: v128 = i8x16(
4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7
);
let precision = normalizer.precision() as u32;
let src_width = src_row.len();
let initial = i32x4_splat(1 << (precision - 1));
for (dst_x, coeffs_chunk) in normalizer.chunks().iter().enumerate() {
let x_start = coeffs_chunk.start as usize;
let mut x = x_start;
let mut coeffs = coeffs_chunk.values();
let mut sss = initial;
let max_x = src_width.saturating_sub(5);
if x < max_x {
let coeffs_by_4 = coeffs.chunks_exact(4);
for k in coeffs_by_4 {
let ksource = wasm32_utils::loadl_i64(k, 0);
let source = wasm32_utils::load_v128(src_row, x);
let pix = i8x16_swizzle(source, PIX_SH1);
let mmk = i8x16_swizzle(ksource, COEF_SH1);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
let pix = i8x16_swizzle(source, PIX_SH2);
let mmk = i8x16_swizzle(ksource, COEF_SH2);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
x += 4;
if x >= max_x {
break;
}
}
}
let max_x = src_width.saturating_sub(2);
if x < max_x {
let coeffs_by_2 = coeffs[x - x_start..].chunks_exact(2);
for k in coeffs_by_2 {
let mmk = wasm32_utils::ptr_i16_to_set1_i32(k, 0);
let source = wasm32_utils::loadl_i64(src_row, x);
let pix = i8x16_swizzle(source, PIX_SH1);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
x += 2;
if x >= max_x {
break;
}
}
}
coeffs = coeffs.split_at(x - x_start).1;
for &k in coeffs {
let pix = wasm32_utils::i32x4_extend_low_ptr_u8x3(src_row, x);
let mmk = i32x4_splat(k as i32);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
x += 1;
}
sss = i32x4_shr(sss, precision);
sss = i16x8_narrow_i32x4(sss, sss);
let pixel: u32 = i32::cast_unsigned(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss, sss)));
let bytes = pixel.to_le_bytes();
dst_row.get_unchecked_mut(dst_x).0 = [bytes[0], bytes[1], bytes[2]];
}
}