use half::f16;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use crate::cpu_features::is_aarch_f16c_supported;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use crate::neon::stack_blur_pass_neon_f16;
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
use crate::sse::stack_blur_pass_sse_f16;
use crate::stack_blur::StackBlurPass;
use crate::stack_blur_f32::stack_blur_pass_f;
use crate::unsafe_slice::UnsafeSlice;
use crate::{FastBlurChannels, ThreadingPolicy};
fn stack_blur_worker_horizontal(
slice: &UnsafeSlice<f16>,
stride: u32,
width: u32,
height: u32,
radius: u32,
channels: FastBlurChannels,
thread: usize,
thread_count: usize,
) {
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
let _is_sse_available = std::arch::is_x86_feature_detected!("sse4.1");
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
let _is_f16c_available = std::arch::is_x86_feature_detected!("f16c");
match channels {
FastBlurChannels::Plane => {
let mut _dispatcher: fn(
&UnsafeSlice<f16>,
u32,
u32,
u32,
u32,
StackBlurPass,
usize,
usize,
) = stack_blur_pass_f::<f16, f32, 1>;
_dispatcher(
slice,
stride,
width,
height,
radius,
StackBlurPass::Horizontal,
thread,
thread_count,
);
}
FastBlurChannels::Channels3 => {
let mut _dispatcher: fn(
&UnsafeSlice<f16>,
u32,
u32,
u32,
u32,
StackBlurPass,
usize,
usize,
) = stack_blur_pass_f::<f16, f32, 3>;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if is_aarch_f16c_supported() {
_dispatcher = stack_blur_pass_neon_f16::<3>;
}
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
if _is_sse_available && _is_f16c_available {
_dispatcher = stack_blur_pass_sse_f16::<3>;
}
}
_dispatcher(
slice,
stride,
width,
height,
radius,
StackBlurPass::Horizontal,
thread,
thread_count,
);
}
FastBlurChannels::Channels4 => {
let mut _dispatcher: fn(
&UnsafeSlice<f16>,
u32,
u32,
u32,
u32,
StackBlurPass,
usize,
usize,
) = stack_blur_pass_f::<f16, f32, 4>;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if is_aarch_f16c_supported() {
_dispatcher = stack_blur_pass_neon_f16::<4>;
}
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
if _is_sse_available && _is_f16c_available {
_dispatcher = stack_blur_pass_sse_f16::<4>;
}
}
_dispatcher(
slice,
stride,
width,
height,
radius,
StackBlurPass::Horizontal,
thread,
thread_count,
);
}
}
}
fn stack_blur_worker_vertical(
slice: &UnsafeSlice<f16>,
stride: u32,
width: u32,
height: u32,
radius: u32,
channels: FastBlurChannels,
thread: usize,
thread_count: usize,
) {
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
let _is_sse_available = std::arch::is_x86_feature_detected!("sse4.1");
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
let _is_f16c_available = std::arch::is_x86_feature_detected!("f16c");
match channels {
FastBlurChannels::Plane => {
let mut _dispatcher: fn(
&UnsafeSlice<f16>,
u32,
u32,
u32,
u32,
StackBlurPass,
usize,
usize,
) = stack_blur_pass_f::<f16, f32, 1>;
_dispatcher(
slice,
stride,
width,
height,
radius,
StackBlurPass::Vertical,
thread,
thread_count,
);
}
FastBlurChannels::Channels3 => {
let mut _dispatcher: fn(
&UnsafeSlice<f16>,
u32,
u32,
u32,
u32,
StackBlurPass,
usize,
usize,
) = stack_blur_pass_f::<f16, f32, 3>;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if is_aarch_f16c_supported() {
_dispatcher = stack_blur_pass_neon_f16::<3>;
}
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
if _is_sse_available && _is_f16c_available {
_dispatcher = stack_blur_pass_sse_f16::<3>;
}
}
_dispatcher(
slice,
stride,
width,
height,
radius,
StackBlurPass::Vertical,
thread,
thread_count,
);
}
FastBlurChannels::Channels4 => {
let mut _dispatcher: fn(
&UnsafeSlice<f16>,
u32,
u32,
u32,
u32,
StackBlurPass,
usize,
usize,
) = stack_blur_pass_f::<f16, f32, 4>;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if is_aarch_f16c_supported() {
_dispatcher = stack_blur_pass_neon_f16::<4>;
}
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
if _is_sse_available && _is_f16c_available {
_dispatcher = stack_blur_pass_sse_f16::<4>;
}
}
_dispatcher(
slice,
stride,
width,
height,
radius,
StackBlurPass::Vertical,
thread,
thread_count,
);
}
}
}
pub fn stack_blur_f16(
in_place: &mut [f16],
width: u32,
height: u32,
radius: u32,
channels: FastBlurChannels,
threading_policy: ThreadingPolicy,
) {
let stride = width * channels.get_channels() as u32;
let radius = std::cmp::max(radius, 2);
let thread_count = threading_policy.get_threads_count(width, height) as u32;
if thread_count == 1 {
let slice = UnsafeSlice::new(in_place);
stack_blur_worker_horizontal(&slice, stride, width, height, radius, channels, 0, 1);
stack_blur_worker_vertical(&slice, stride, width, height, radius, channels, 0, 1);
return;
}
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(thread_count as usize)
.build()
.unwrap();
pool.scope(|scope| {
let slice = UnsafeSlice::new(in_place);
for i in 0..thread_count {
scope.spawn(move |_| {
stack_blur_worker_horizontal(
&slice,
stride,
width,
height,
radius,
channels,
i as usize,
thread_count as usize,
);
});
}
});
pool.scope(|scope| {
let slice = UnsafeSlice::new(in_place);
for i in 0..thread_count {
scope.spawn(move |_| {
stack_blur_worker_vertical(
&slice,
stride,
width,
height,
radius,
channels,
i as usize,
thread_count as usize,
);
});
}
})
}