use core::arch::wasm32::*;
use core::mem::transmute;
use crate::convolution::optimisations::Normalizer16;
use crate::pixels::U8x4;
use crate::{wasm32_utils, ImageView, ImageViewMut};
#[inline]
pub(crate) fn horiz_convolution(
src_view: &impl ImageView<Pixel = U8x4>,
dst_view: &mut impl ImageViewMut<Pixel = U8x4>,
offset: u32,
normalizer: &Normalizer16,
) {
let dst_height = dst_view.height();
let src_iter = src_view.iter_4_rows(offset, dst_height + offset);
let dst_iter = dst_view.iter_4_rows_mut();
for (src_rows, dst_rows) in src_iter.zip(dst_iter) {
unsafe {
horiz_convolution_four_rows(src_rows, dst_rows, normalizer);
}
}
let yy = dst_height - dst_height % 4;
let src_rows = src_view.iter_rows(yy + offset);
let dst_rows = dst_view.iter_rows_mut(yy);
for (src_row, dst_row) in src_rows.zip(dst_rows) {
unsafe {
horiz_convolution_one_row(src_row, dst_row, normalizer);
}
}
}
#[target_feature(enable = "simd128")]
unsafe fn horiz_convolution_four_rows(
src_rows: [&[U8x4]; 4],
dst_rows: [&mut [U8x4]; 4],
normalizer: &Normalizer16,
) {
let precision = normalizer.precision() as u32;
let initial = i32x4_splat(1 << (precision - 1));
const MASK_LO: v128 = i8x16(0, -1, 4, -1, 1, -1, 5, -1, 2, -1, 6, -1, 3, -1, 7, -1);
const MASK_HI: v128 = i8x16(8, -1, 12, -1, 9, -1, 13, -1, 10, -1, 14, -1, 11, -1, 15, -1);
const MASK: v128 = i8x16(0, -1, 4, -1, 1, -1, 5, -1, 2, -1, 6, -1, 3, -1, 7, -1);
for (dst_x, coeffs_chunk) in normalizer.chunks().iter().enumerate() {
let mut x: usize = coeffs_chunk.start as usize;
let mut sss0 = initial;
let mut sss1 = initial;
let mut sss2 = initial;
let mut sss3 = initial;
let coeffs = coeffs_chunk.values();
let coeffs_by_4 = coeffs.chunks_exact(4);
let reminder1 = coeffs_by_4.remainder();
for k in coeffs_by_4 {
let mmk_lo = wasm32_utils::ptr_i16_to_set1_i32(k, 0);
let mmk_hi = wasm32_utils::ptr_i16_to_set1_i32(k, 2);
let mut source = wasm32_utils::load_v128(src_rows[0], x);
let mut pix = i8x16_swizzle(source, MASK_LO);
sss0 = i32x4_add(sss0, i32x4_dot_i16x8(pix, mmk_lo));
pix = i8x16_swizzle(source, MASK_HI);
sss0 = i32x4_add(sss0, i32x4_dot_i16x8(pix, mmk_hi));
source = wasm32_utils::load_v128(src_rows[1], x);
pix = i8x16_swizzle(source, MASK_LO);
sss1 = i32x4_add(sss1, i32x4_dot_i16x8(pix, mmk_lo));
pix = i8x16_swizzle(source, MASK_HI);
sss1 = i32x4_add(sss1, i32x4_dot_i16x8(pix, mmk_hi));
source = wasm32_utils::load_v128(src_rows[2], x);
pix = i8x16_swizzle(source, MASK_LO);
sss2 = i32x4_add(sss2, i32x4_dot_i16x8(pix, mmk_lo));
pix = i8x16_swizzle(source, MASK_HI);
sss2 = i32x4_add(sss2, i32x4_dot_i16x8(pix, mmk_hi));
source = wasm32_utils::load_v128(src_rows[3], x);
pix = i8x16_swizzle(source, MASK_LO);
sss3 = i32x4_add(sss3, i32x4_dot_i16x8(pix, mmk_lo));
pix = i8x16_swizzle(source, MASK_HI);
sss3 = i32x4_add(sss3, i32x4_dot_i16x8(pix, mmk_hi));
x += 4;
}
let coeffs_by_2 = reminder1.chunks_exact(2);
let reminder2 = coeffs_by_2.remainder();
for k in coeffs_by_2 {
let mmk = wasm32_utils::ptr_i16_to_set1_i32(k, 0);
let mut pix = wasm32_utils::loadl_i64(src_rows[0], x);
pix = i8x16_swizzle(pix, MASK);
sss0 = i32x4_add(sss0, i32x4_dot_i16x8(pix, mmk));
pix = wasm32_utils::loadl_i64(src_rows[1], x);
pix = i8x16_swizzle(pix, MASK);
sss1 = i32x4_add(sss1, i32x4_dot_i16x8(pix, mmk));
pix = wasm32_utils::loadl_i64(src_rows[2], x);
pix = i8x16_swizzle(pix, MASK);
sss2 = i32x4_add(sss2, i32x4_dot_i16x8(pix, mmk));
pix = wasm32_utils::loadl_i64(src_rows[3], x);
pix = i8x16_swizzle(pix, MASK);
sss3 = i32x4_add(sss3, i32x4_dot_i16x8(pix, mmk));
x += 2;
}
if let Some(&k) = reminder2.first() {
let mmk = i32x4_splat(k as i32);
let mut pix = wasm32_utils::i32x4_extend_low_ptr_u8x4(src_rows[0], x);
sss0 = i32x4_add(sss0, i32x4_dot_i16x8(pix, mmk));
pix = wasm32_utils::i32x4_extend_low_ptr_u8x4(src_rows[1], x);
sss1 = i32x4_add(sss1, i32x4_dot_i16x8(pix, mmk));
pix = wasm32_utils::i32x4_extend_low_ptr_u8x4(src_rows[2], x);
sss2 = i32x4_add(sss2, i32x4_dot_i16x8(pix, mmk));
pix = wasm32_utils::i32x4_extend_low_ptr_u8x4(src_rows[3], x);
sss3 = i32x4_add(sss3, i32x4_dot_i16x8(pix, mmk));
}
sss0 = i32x4_shr(sss0, precision);
sss1 = i32x4_shr(sss1, precision);
sss2 = i32x4_shr(sss2, precision);
sss3 = i32x4_shr(sss3, precision);
sss0 = i16x8_narrow_i32x4(sss0, sss0);
sss1 = i16x8_narrow_i32x4(sss1, sss1);
sss2 = i16x8_narrow_i32x4(sss2, sss2);
sss3 = i16x8_narrow_i32x4(sss3, sss3);
*dst_rows[0].get_unchecked_mut(dst_x) =
transmute(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss0, sss0)));
*dst_rows[1].get_unchecked_mut(dst_x) =
transmute(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss1, sss1)));
*dst_rows[2].get_unchecked_mut(dst_x) =
transmute(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss2, sss2)));
*dst_rows[3].get_unchecked_mut(dst_x) =
transmute(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss3, sss3)));
}
}
#[target_feature(enable = "simd128")]
unsafe fn horiz_convolution_one_row(
src_row: &[U8x4],
dst_row: &mut [U8x4],
normalizer: &Normalizer16,
) {
let precision = normalizer.precision() as u32;
let initial = i32x4_splat(1 << (precision - 1));
const SH1: v128 = i8x16(0, -1, 8, -1, 1, -1, 9, -1, 2, -1, 10, -1, 3, -1, 11, -1);
const SH2: v128 = i8x16(0, 1, 4, 5, 0, 1, 4, 5, 0, 1, 4, 5, 0, 1, 4, 5);
const SH3: v128 = i8x16(4, -1, 12, -1, 5, -1, 13, -1, 6, -1, 14, -1, 7, -1, 15, -1);
const SH4: v128 = i8x16(2, 3, 6, 7, 2, 3, 6, 7, 2, 3, 6, 7, 2, 3, 6, 7);
const SH5: v128 = i8x16(8, 9, 12, 13, 8, 9, 12, 13, 8, 9, 12, 13, 8, 9, 12, 13);
const SH6: v128 = i8x16(
10, 11, 14, 15, 10, 11, 14, 15, 10, 11, 14, 15, 10, 11, 14, 15,
);
const SH7: v128 = i8x16(0, -1, 4, -1, 1, -1, 5, -1, 2, -1, 6, -1, 3, -1, 7, -1);
for (dst_x, coeffs_chunk) in normalizer.chunks().iter().enumerate() {
let mut x: usize = coeffs_chunk.start as usize;
let mut sss = initial;
let coeffs_by_8 = coeffs_chunk.values().chunks_exact(8);
let reminder8 = coeffs_by_8.remainder();
for k in coeffs_by_8 {
let ksource = wasm32_utils::load_v128(k, 0);
let mut source = wasm32_utils::load_v128(src_row, x);
let mut pix = i8x16_swizzle(source, SH1);
let mut mmk = i8x16_swizzle(ksource, SH2);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
pix = i8x16_swizzle(source, SH3);
mmk = i8x16_swizzle(ksource, SH4);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
source = wasm32_utils::load_v128(src_row, x + 4);
pix = i8x16_swizzle(source, SH1);
mmk = i8x16_swizzle(ksource, SH5);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
pix = i8x16_swizzle(source, SH3);
mmk = i8x16_swizzle(ksource, SH6);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
x += 8;
}
let coeffs_by_4 = reminder8.chunks_exact(4);
let reminder4 = coeffs_by_4.remainder();
for k in coeffs_by_4 {
let source = wasm32_utils::load_v128(src_row, x);
let ksource = wasm32_utils::loadl_i64(k, 0);
let mut pix = i8x16_swizzle(source, SH1);
let mut mmk = i8x16_swizzle(ksource, SH2);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
pix = i8x16_swizzle(source, SH3);
mmk = i8x16_swizzle(ksource, SH4);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
x += 4;
}
let coeffs_by_2 = reminder4.chunks_exact(2);
let reminder2 = coeffs_by_2.remainder();
for k in coeffs_by_2 {
let mmk = wasm32_utils::ptr_i16_to_set1_i32(k, 0);
let source = wasm32_utils::loadl_i64(src_row, x);
let pix = i8x16_swizzle(source, SH7);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
x += 2
}
if let Some(&k) = reminder2.first() {
let pix = wasm32_utils::i32x4_extend_low_ptr_u8x4(src_row, x);
let mmk = i32x4_splat(k as i32);
sss = i32x4_add(sss, i32x4_dot_i16x8(pix, mmk));
}
sss = i32x4_shr(sss, precision);
sss = i16x8_narrow_i32x4(sss, sss);
*dst_row.get_unchecked_mut(dst_x) =
transmute(i32x4_extract_lane::<0>(u8x16_narrow_i16x8(sss, sss)));
}
}