#![cfg(feature = "neon_luts")]
use crate::conversions::LutBarycentricReduction;
use crate::conversions::interpolator::BarycentricWeight;
use crate::conversions::lut_transforms::Lut4x3Factory;
use crate::conversions::neon::interpolator::*;
use crate::conversions::neon::interpolator_q0_15::NeonAlignedI16x4;
use crate::conversions::neon::lut4_to_3_q0_15::TransformLut4To3NeonQ0_15;
use crate::conversions::neon::{NeonAlignedF32, assert_barycentric_lut_size_precondition};
use crate::transform::PointeeSizeExpressible;
use crate::{
BarycentricWeightScale, CmsError, DataColorSpace, InterpolationMethod, Layout,
TransformExecutor, TransformOptions,
};
use num_traits::AsPrimitive;
use std::arch::aarch64::*;
use std::marker::PhantomData;
use std::sync::Arc;
struct TransformLut4To3Neon<
T,
U,
const LAYOUT: u8,
const GRID_SIZE: usize,
const BIT_DEPTH: usize,
const BINS: usize,
const BARYCENTRIC_BINS: usize,
> {
lut: Vec<NeonAlignedF32>,
_phantom: PhantomData<T>,
_phantom1: PhantomData<U>,
interpolation_method: InterpolationMethod,
weights: Box<[BarycentricWeight<f32>; BINS]>,
color_space: DataColorSpace,
is_linear: bool,
}
impl<
T: Copy + AsPrimitive<f32> + Default + PointeeSizeExpressible,
U: AsPrimitive<usize>,
const LAYOUT: u8,
const GRID_SIZE: usize,
const BIT_DEPTH: usize,
const BINS: usize,
const BARYCENTRIC_BINS: usize,
> TransformLut4To3Neon<T, U, LAYOUT, GRID_SIZE, BIT_DEPTH, BINS, BARYCENTRIC_BINS>
where
f32: AsPrimitive<T>,
u32: AsPrimitive<T>,
(): LutBarycentricReduction<T, U>,
{
#[allow(unused_unsafe)]
#[inline(never)]
fn transform_chunk(
&self,
src: &[T],
dst: &mut [T],
interpolator: Box<dyn NeonMdInterpolationDouble + Send + Sync>,
) {
let cn = Layout::from(LAYOUT);
let channels = cn.channels();
let grid_size = GRID_SIZE as i32;
let grid_size3 = grid_size * grid_size * grid_size;
let value_scale = unsafe { vdupq_n_f32(((1 << BIT_DEPTH) - 1) as f32) };
let max_value = ((1 << BIT_DEPTH) - 1u32).as_();
for (src, dst) in src.chunks_exact(4).zip(dst.chunks_exact_mut(channels)) {
let c = <() as LutBarycentricReduction<T, U>>::reduce::<BIT_DEPTH, BARYCENTRIC_BINS>(
src[0],
);
let m = <() as LutBarycentricReduction<T, U>>::reduce::<BIT_DEPTH, BARYCENTRIC_BINS>(
src[1],
);
let y = <() as LutBarycentricReduction<T, U>>::reduce::<BIT_DEPTH, BARYCENTRIC_BINS>(
src[2],
);
let k = <() as LutBarycentricReduction<T, U>>::reduce::<BIT_DEPTH, BARYCENTRIC_BINS>(
src[3],
);
let k_weights = self.weights[k.as_()];
let w: i32 = k_weights.x;
let w_n: i32 = k_weights.x_n;
let t: f32 = k_weights.w;
let table1 = &self.lut[(w * grid_size3) as usize..];
let table2 = &self.lut[(w_n * grid_size3) as usize..];
let (a0, b0) = interpolator.inter3_neon(
table1,
table2,
c.as_(),
m.as_(),
y.as_(),
self.weights.as_slice(),
);
let (a0, b0) = (a0.v, b0.v);
if T::FINITE {
unsafe {
let t0 = vdupq_n_f32(t);
let hp = vfmsq_f32(a0, a0, t0);
let mut v = vfmaq_f32(hp, b0, t0);
v = vmulq_f32(v, value_scale);
v = vminq_f32(v, value_scale);
let jvx = vcvtaq_u32_f32(v);
dst[cn.r_i()] = vgetq_lane_u32::<0>(jvx).as_();
dst[cn.g_i()] = vgetq_lane_u32::<1>(jvx).as_();
dst[cn.b_i()] = vgetq_lane_u32::<2>(jvx).as_();
}
} else {
unsafe {
let t0 = vdupq_n_f32(t);
let hp = vfmsq_f32(a0, a0, t0);
let v = vfmaq_f32(hp, b0, t0);
dst[cn.r_i()] = vgetq_lane_f32::<0>(v).as_();
dst[cn.g_i()] = vgetq_lane_f32::<1>(v).as_();
dst[cn.b_i()] = vgetq_lane_f32::<2>(v).as_();
}
}
if channels == 4 {
dst[cn.a_i()] = max_value;
}
}
}
}
impl<
T: Copy + AsPrimitive<f32> + Default + PointeeSizeExpressible,
U: AsPrimitive<usize>,
const LAYOUT: u8,
const GRID_SIZE: usize,
const BIT_DEPTH: usize,
const BINS: usize,
const BARYCENTRIC_BINS: usize,
> TransformExecutor<T>
for TransformLut4To3Neon<T, U, LAYOUT, GRID_SIZE, BIT_DEPTH, BINS, BARYCENTRIC_BINS>
where
f32: AsPrimitive<T>,
u32: AsPrimitive<T>,
(): LutBarycentricReduction<T, U>,
{
fn transform(&self, src: &[T], dst: &mut [T]) -> Result<(), CmsError> {
let cn = Layout::from(LAYOUT);
let channels = cn.channels();
if src.len() % 4 != 0 {
return Err(CmsError::LaneMultipleOfChannels);
}
if dst.len() % channels != 0 {
return Err(CmsError::LaneMultipleOfChannels);
}
let src_chunks = src.len() / 4;
let dst_chunks = dst.len() / channels;
if src_chunks != dst_chunks {
return Err(CmsError::LaneSizeMismatch);
}
if self.color_space == DataColorSpace::Lab
|| (self.is_linear && self.color_space == DataColorSpace::Rgb)
|| self.color_space == DataColorSpace::Xyz
{
self.transform_chunk(src, dst, Box::new(TrilinearNeonDouble::<GRID_SIZE> {}));
} else {
match self.interpolation_method {
#[cfg(feature = "options")]
InterpolationMethod::Tetrahedral => {
self.transform_chunk(src, dst, Box::new(TetrahedralNeonDouble::<GRID_SIZE> {}));
}
#[cfg(feature = "options")]
InterpolationMethod::Pyramid => {
self.transform_chunk(src, dst, Box::new(PyramidalNeonDouble::<GRID_SIZE> {}));
}
#[cfg(feature = "options")]
InterpolationMethod::Prism => {
self.transform_chunk(src, dst, Box::new(PrismaticNeonDouble::<GRID_SIZE> {}));
}
InterpolationMethod::Linear => {
self.transform_chunk(src, dst, Box::new(TrilinearNeonDouble::<GRID_SIZE> {}));
}
}
}
Ok(())
}
}
pub(crate) struct NeonLut4x3Factory {}
impl Lut4x3Factory for NeonLut4x3Factory {
fn make_transform_4x3<
T: Copy + AsPrimitive<f32> + Default + PointeeSizeExpressible + 'static + Send + Sync,
const LAYOUT: u8,
const GRID_SIZE: usize,
const BIT_DEPTH: usize,
>(
lut: Vec<f32>,
options: TransformOptions,
color_space: DataColorSpace,
is_linear: bool,
) -> Arc<dyn TransformExecutor<T> + Sync + Send>
where
f32: AsPrimitive<T>,
u32: AsPrimitive<T>,
(): LutBarycentricReduction<T, u8>,
(): LutBarycentricReduction<T, u16>,
{
if options.prefer_fixed_point
&& BIT_DEPTH < 16
&& std::arch::is_aarch64_feature_detected!("rdm")
{
let q: f32 = if T::FINITE {
((1i32 << BIT_DEPTH as i32) - 1) as f32
} else {
((1i32 << 14i32) - 1) as f32
};
let lut = lut
.chunks_exact(3)
.map(|x| {
NeonAlignedI16x4([
(x[0] * q).round() as i16,
(x[1] * q).round() as i16,
(x[2] * q).round() as i16,
0,
])
})
.collect::<Vec<_>>();
return match options.barycentric_weight_scale {
BarycentricWeightScale::Low => {
let bins = BarycentricWeight::<i16>::create_ranged_256::<GRID_SIZE>();
assert_barycentric_lut_size_precondition::<i16, GRID_SIZE>(bins.as_slice());
Arc::new(TransformLut4To3NeonQ0_15::<
T,
u8,
LAYOUT,
GRID_SIZE,
BIT_DEPTH,
256,
256,
> {
lut,
_phantom: PhantomData,
_phantom1: PhantomData,
interpolation_method: options.interpolation_method,
weights: bins,
color_space,
is_linear,
})
}
#[cfg(feature = "options")]
BarycentricWeightScale::High => {
let bins = BarycentricWeight::<i16>::create_binned::<GRID_SIZE, 65536>();
assert_barycentric_lut_size_precondition::<i16, GRID_SIZE>(bins.as_slice());
Arc::new(TransformLut4To3NeonQ0_15::<
T,
u16,
LAYOUT,
GRID_SIZE,
BIT_DEPTH,
65536,
65536,
> {
lut,
_phantom: PhantomData,
_phantom1: PhantomData,
interpolation_method: options.interpolation_method,
weights: bins,
color_space,
is_linear,
})
}
};
}
let lut = lut
.chunks_exact(3)
.map(|x| NeonAlignedF32([x[0], x[1], x[2], 0f32]))
.collect::<Vec<_>>();
match options.barycentric_weight_scale {
BarycentricWeightScale::Low => {
let bins = BarycentricWeight::<f32>::create_ranged_256::<GRID_SIZE>();
assert_barycentric_lut_size_precondition::<f32, GRID_SIZE>(bins.as_slice());
Arc::new(
TransformLut4To3Neon::<T, u8, LAYOUT, GRID_SIZE, BIT_DEPTH, 256, 256> {
lut,
_phantom: PhantomData,
_phantom1: PhantomData,
interpolation_method: options.interpolation_method,
weights: bins,
color_space,
is_linear,
},
)
}
#[cfg(feature = "options")]
BarycentricWeightScale::High => {
let bins = BarycentricWeight::<f32>::create_binned::<GRID_SIZE, 65536>();
assert_barycentric_lut_size_precondition::<f32, GRID_SIZE>(bins.as_slice());
Arc::new(
TransformLut4To3Neon::<T, u16, LAYOUT, GRID_SIZE, BIT_DEPTH, 65536, 65536> {
lut,
_phantom: PhantomData,
_phantom1: PhantomData,
interpolation_method: options.interpolation_method,
weights: bins,
color_space,
is_linear,
},
)
}
}
}
}