#[allow(unused_imports)] use archmage::{
NeonToken, ScalarToken, Wasm128Token, X64V3Token, X64V4Token, arcane, incant, rite,
};
#[allow(dead_code)]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128)]
fn mt_srgb_to_linear(token: Token, srgb: [f32; 16]) -> [f32; 16] {
use crate::rational_poly::{S2L_P, S2L_Q, SRGB_THRESHOLD};
let srgb = f32x16::from_array(token, srgb);
let zero = f32x16::zero(token);
let one = f32x16::splat(token, 1.0);
let clamped = srgb.max(zero).min(one);
let linear_result = clamped * f32x16::splat(token, 1.0 / 12.92);
let x = clamped;
let yp = f32x16::splat(token, S2L_P[4]).mul_add(x, f32x16::splat(token, S2L_P[3]));
let yp = yp.mul_add(x, f32x16::splat(token, S2L_P[2]));
let yp = yp.mul_add(x, f32x16::splat(token, S2L_P[1]));
let yp = yp.mul_add(x, f32x16::splat(token, S2L_P[0]));
let yq = f32x16::splat(token, S2L_Q[4]).mul_add(x, f32x16::splat(token, S2L_Q[3]));
let yq = yq.mul_add(x, f32x16::splat(token, S2L_Q[2]));
let yq = yq.mul_add(x, f32x16::splat(token, S2L_Q[1]));
let yq = yq.mul_add(x, f32x16::splat(token, S2L_Q[0]));
let power_result = (yp / yq).min(one);
let mask = clamped.simd_lt(f32x16::splat(token, SRGB_THRESHOLD));
let result = f32x16::blend(mask, linear_result, power_result);
let ge_one = srgb.simd_ge(one);
f32x16::blend(ge_one, one, result).to_array()
}
#[allow(dead_code)]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128)]
fn mt_linear_to_srgb(token: Token, linear: [f32; 16]) -> [f32; 16] {
use crate::rational_poly::{L2S_P, L2S_Q, LINEAR_THRESHOLD};
let linear = f32x16::from_array(token, linear);
let zero = f32x16::zero(token);
let one = f32x16::splat(token, 1.0);
let clamped = linear.max(zero).min(one);
let linear_result = clamped * f32x16::splat(token, 12.92);
let x = clamped.sqrt();
let yp = f32x16::splat(token, L2S_P[4]).mul_add(x, f32x16::splat(token, L2S_P[3]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[2]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[1]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[0]));
let yq = f32x16::splat(token, L2S_Q[4]).mul_add(x, f32x16::splat(token, L2S_Q[3]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[2]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[1]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[0]));
let power_result = (yp / yq).min(one);
let mask = clamped.simd_lt(f32x16::splat(token, LINEAR_THRESHOLD));
let result = f32x16::blend(mask, linear_result, power_result);
let ge_one = linear.simd_ge(one);
f32x16::blend(ge_one, one, result).to_array()
}
#[allow(dead_code)]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128)]
fn mt_gamma_to_linear(token: Token, encoded: [f32; 16], gamma: f32) -> [f32; 16] {
let encoded = f32x16::from_array(token, encoded);
let zero = f32x16::zero(token);
let one = f32x16::splat(token, 1.0);
let encoded = encoded.max(zero).min(one);
encoded.pow_midp(gamma).to_array()
}
#[allow(dead_code)]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128)]
fn mt_linear_to_gamma(token: Token, linear: [f32; 16], gamma: f32) -> [f32; 16] {
let linear = f32x16::from_array(token, linear);
let zero = f32x16::zero(token);
let one = f32x16::splat(token, 1.0);
let linear = linear.max(zero).min(one);
linear.pow_midp(1.0 / gamma).to_array()
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn srgb_to_linear_slice_tier(_token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
*chunk = incant!(mt_srgb_to_linear(*chunk));
}
for v in remainder {
*v = crate::scalar::srgb_to_linear(*v);
}
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn srgb_to_linear_rgba_slice_tier(_token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
*chunk = incant!(mt_srgb_to_linear(*chunk));
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
pixel[0] = crate::scalar::srgb_to_linear(pixel[0]);
pixel[1] = crate::scalar::srgb_to_linear(pixel[1]);
pixel[2] = crate::scalar::srgb_to_linear(pixel[2]);
}
}
#[inline]
pub fn srgb_to_linear_slice(values: &mut [f32]) {
incant!(
srgb_to_linear_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[inline]
pub fn srgb_to_linear_rgba_slice(values: &mut [f32]) {
incant!(
srgb_to_linear_rgba_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_slice_tier(_token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
*chunk = incant!(mt_linear_to_srgb(*chunk));
}
for v in remainder {
*v = crate::scalar::linear_to_srgb(*v);
}
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_rgba_slice_tier(_token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
*chunk = incant!(mt_linear_to_srgb(*chunk));
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
pixel[0] = crate::scalar::linear_to_srgb(pixel[0]);
pixel[1] = crate::scalar::linear_to_srgb(pixel[1]);
pixel[2] = crate::scalar::linear_to_srgb(pixel[2]);
}
}
#[inline]
pub fn linear_to_srgb_slice(values: &mut [f32]) {
incant!(
linear_to_srgb_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[inline]
pub fn linear_to_srgb_rgba_slice(values: &mut [f32]) {
incant!(
linear_to_srgb_rgba_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn srgb_to_linear_extended_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::srgb::srgb_to_linear_extended_x16(token, v).to_array();
}
for v in remainder {
*v = crate::scalar::srgb_to_linear_extended(*v);
}
}
#[inline]
pub fn srgb_to_linear_extended_slice(values: &mut [f32]) {
incant!(
srgb_to_linear_extended_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_extended_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::srgb::linear_to_srgb_extended_x16(token, v).to_array();
}
for v in remainder {
*v = crate::scalar::linear_to_srgb_extended(*v);
}
}
#[inline]
pub fn linear_to_srgb_extended_slice(values: &mut [f32]) {
incant!(
linear_to_srgb_extended_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn srgb_to_linear_premultiply_rgba_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
let converted_arr = incant!(mt_srgb_to_linear(*chunk));
let converted = f32x16::from_array(token, converted_arr);
let alpha = f32x16::from_array(
token,
[
a[0], a[0], a[0], 1.0, a[1], a[1], a[1], 1.0, a[2], a[2], a[2], 1.0, a[3], a[3],
a[3], 1.0,
],
);
*chunk = (converted * alpha).to_array();
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
let a = pixel[3];
pixel[0] = crate::scalar::srgb_to_linear(pixel[0]) * a;
pixel[1] = crate::scalar::srgb_to_linear(pixel[1]) * a;
pixel[2] = crate::scalar::srgb_to_linear(pixel[2]) * a;
}
}
#[inline]
pub fn srgb_to_linear_premultiply_rgba_slice(values: &mut [f32]) {
incant!(
srgb_to_linear_premultiply_rgba_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn unpremultiply_linear_to_srgb_rgba_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
let inv = [
if a[0] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[0]
} else {
0.0
},
if a[1] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[1]
} else {
0.0
},
if a[2] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[2]
} else {
0.0
},
if a[3] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[3]
} else {
0.0
},
];
let v = f32x16::from_array(token, *chunk);
let inv_alpha = f32x16::from_array(
token,
[
inv[0], inv[0], inv[0], 1.0, inv[1], inv[1], inv[1], 1.0, inv[2], inv[2], inv[2],
1.0, inv[3], inv[3], inv[3], 1.0,
],
);
let unpremul = (v * inv_alpha).to_array();
*chunk = incant!(mt_linear_to_srgb(unpremul));
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
let a = pixel[3];
if a > crate::UNPREMUL_ALPHA_THRESHOLD {
let inv_a = 1.0 / a;
pixel[0] = crate::scalar::linear_to_srgb(pixel[0] * inv_a);
pixel[1] = crate::scalar::linear_to_srgb(pixel[1] * inv_a);
pixel[2] = crate::scalar::linear_to_srgb(pixel[2] * inv_a);
} else {
pixel[0] = 0.0;
pixel[1] = 0.0;
pixel[2] = 0.0;
}
}
}
#[inline]
pub fn unpremultiply_linear_to_srgb_rgba_slice(values: &mut [f32]) {
incant!(
unpremultiply_linear_to_srgb_rgba_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[inline]
pub fn srgb_u8_to_linear_slice(input: &[u8], output: &mut [f32]) {
assert_eq!(input.len(), output.len());
let (in_chunks, in_remainder) = input.as_chunks::<8>();
let (out_chunks, out_remainder) = output.as_chunks_mut::<8>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
*out = crate::scalar::srgb_u8_to_linear_x8(*inp);
}
for (inp, out) in in_remainder.iter().zip(out_remainder.iter_mut()) {
*out = crate::scalar::srgb_u8_to_linear(*inp);
}
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_u8_slice_tier(token: Token, input: &[f32], output: &mut [u8]) {
let lut = crate::const_luts::linear_to_srgb_u8();
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let v = f32x16::from_array(token, *inp);
let clamped = v.max(f32x16::zero(token)).min(f32x16::splat(token, 1.0));
let idx_f = clamped.mul_add(f32x16::splat(token, 4095.0), f32x16::splat(token, 0.5));
let idx_i = idx_f.to_i32().to_array();
for i in 0..16 {
out[i] = lut[(idx_i[i] as usize) & 0xFFF];
}
}
for (inp, out) in in_rem.iter().zip(out_rem.iter_mut()) {
let clamped = inp.clamp(0.0, 1.0);
let idx = (clamped * 4095.0 + 0.5) as usize & 0xFFF;
*out = lut[idx];
}
}
#[inline]
pub fn linear_to_srgb_u8_slice(input: &[f32], output: &mut [u8]) {
assert_eq!(input.len(), output.len());
incant!(
linear_to_srgb_u8_slice_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn srgb_u16_to_linear_slice_tier(token: Token, input: &[u16], output: &mut [f32]) {
use crate::rational_poly::{LINEAR_SCALE, S2L_P, S2L_Q, SRGB_THRESHOLD};
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let mut f = [0.0f32; 16];
for i in 0..16 {
f[i] = inp[i] as f32;
}
let v = f32x16::from_array(token, f) * f32x16::splat(token, 1.0 / 65535.0);
let one = f32x16::splat(token, 1.0);
let linear_result = v * f32x16::splat(token, LINEAR_SCALE);
let yp = f32x16::splat(token, S2L_P[4]).mul_add(v, f32x16::splat(token, S2L_P[3]));
let yp = yp.mul_add(v, f32x16::splat(token, S2L_P[2]));
let yp = yp.mul_add(v, f32x16::splat(token, S2L_P[1]));
let yp = yp.mul_add(v, f32x16::splat(token, S2L_P[0]));
let yq = f32x16::splat(token, S2L_Q[4]).mul_add(v, f32x16::splat(token, S2L_Q[3]));
let yq = yq.mul_add(v, f32x16::splat(token, S2L_Q[2]));
let yq = yq.mul_add(v, f32x16::splat(token, S2L_Q[1]));
let yq = yq.mul_add(v, f32x16::splat(token, S2L_Q[0]));
let power_result = (yp / yq).min(one);
let mask = v.simd_le(f32x16::splat(token, SRGB_THRESHOLD));
let result = f32x16::blend(mask, linear_result, power_result);
let ge_one = v.simd_ge(one);
let result = f32x16::blend(ge_one, one, result);
*out = result.to_array();
}
for (inp, out) in in_rem.iter().zip(out_rem.iter_mut()) {
*out = crate::scalar::srgb_u16_to_linear(*inp);
}
}
#[inline]
pub fn srgb_u16_to_linear_slice(input: &[u16], output: &mut [f32]) {
assert_eq!(input.len(), output.len());
incant!(
srgb_u16_to_linear_slice_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_u16_slice_tier(token: Token, input: &[f32], output: &mut [u16]) {
use crate::rational_poly::{L2S_P, L2S_Q, LINEAR_THRESHOLD};
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let linear = f32x16::from_array(token, *inp);
let zero = f32x16::zero(token);
let one = f32x16::splat(token, 1.0);
let clamped = linear.max(zero).min(one);
let linear_result = clamped * f32x16::splat(token, 12.92);
let x = clamped.sqrt();
let yp = f32x16::splat(token, L2S_P[4]).mul_add(x, f32x16::splat(token, L2S_P[3]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[2]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[1]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[0]));
let yq = f32x16::splat(token, L2S_Q[4]).mul_add(x, f32x16::splat(token, L2S_Q[3]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[2]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[1]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[0]));
let power_result = (yp / yq).min(one);
let thresh_mask = clamped.simd_lt(f32x16::splat(token, LINEAR_THRESHOLD));
let srgb = f32x16::blend(thresh_mask, linear_result, power_result);
let ge_one = linear.simd_ge(one);
let srgb = f32x16::blend(ge_one, one, srgb);
let scaled = srgb.mul_add(f32x16::splat(token, 65535.0), f32x16::splat(token, 0.5));
let idx_i = scaled.to_i32().to_array();
for i in 0..16 {
out[i] = idx_i[i] as u16;
}
}
for (inp, out) in in_rem.iter().zip(out_rem.iter_mut()) {
*out = crate::scalar::linear_to_srgb_u16(*inp);
}
}
#[inline]
pub fn linear_to_srgb_u16_slice(input: &[f32], output: &mut [u16]) {
assert_eq!(input.len(), output.len());
incant!(
linear_to_srgb_u16_slice_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "std")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_u16_slice_fast_tier(token: Token, input: &[f32], output: &mut [u16]) {
let lut = crate::u16_lut::encode_lut();
let max_idx = crate::u16_lut::ENCODE_LUT_N - 1;
let scale = crate::u16_lut::ENCODE_SQRT_SCALE;
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let v = f32x16::from_array(token, *inp);
let clamped = v.max(f32x16::zero(token)).min(f32x16::splat(token, 1.0));
let idx_f = clamped
.sqrt()
.mul_add(f32x16::splat(token, scale), f32x16::splat(token, 0.5));
let idx_i = idx_f.to_i32().to_array();
for i in 0..16 {
out[i] = lut[(idx_i[i] as usize).min(max_idx)];
}
}
for (inp, out) in in_rem.iter().zip(out_rem.iter_mut()) {
*out = crate::scalar::linear_to_srgb_u16_fast(*inp);
}
}
#[inline]
pub fn linear_to_srgb_u16_slice_fast(input: &[f32], output: &mut [u16]) {
assert_eq!(input.len(), output.len());
#[cfg(feature = "std")]
{
incant!(
linear_to_srgb_u16_slice_fast_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(not(feature = "std"))]
{
for (inp, out) in input.iter().zip(output.iter_mut()) {
*out = crate::scalar::linear_to_srgb_u16_fast(*inp);
}
}
}
pub fn srgb_u8_to_linear_rgba_slice(input: &[u8], output: &mut [f32]) {
assert_eq!(input.len(), output.len());
let (in_chunks, in_remainder) = input.as_chunks::<8>();
let (out_chunks, out_remainder) = output.as_chunks_mut::<8>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
*out = crate::scalar::srgb_u8_to_linear_x8(*inp);
out[3] = inp[3] as f32 / 255.0;
out[7] = inp[7] as f32 / 255.0;
}
let in_rem_pixels = in_remainder.chunks_exact(4);
let out_rem_pixels = out_remainder.chunks_exact_mut(4);
for (inp, out) in in_rem_pixels.zip(out_rem_pixels) {
out[0] = crate::scalar::srgb_u8_to_linear(inp[0]);
out[1] = crate::scalar::srgb_u8_to_linear(inp[1]);
out[2] = crate::scalar::srgb_u8_to_linear(inp[2]);
out[3] = inp[3] as f32 / 255.0;
}
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_u8_rgba_slice_tier(token: Token, input: &[f32], output: &mut [u8]) {
let lut = crate::const_luts::linear_to_srgb_u8();
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let v = f32x16::from_array(token, *inp);
let clamped = v.max(f32x16::zero(token)).min(f32x16::splat(token, 1.0));
let idx_f = clamped.mul_add(f32x16::splat(token, 4095.0), f32x16::splat(token, 0.5));
let idx_i = idx_f.to_i32().to_array();
for px in 0..4 {
let base = px * 4;
out[base] = lut[(idx_i[base] as usize) & 0xFFF];
out[base + 1] = lut[(idx_i[base + 1] as usize) & 0xFFF];
out[base + 2] = lut[(idx_i[base + 2] as usize) & 0xFFF];
out[base + 3] = (inp[base + 3].clamp(0.0, 1.0) * 255.0 + 0.5) as u8;
}
}
for (inp, out) in in_rem.chunks_exact(4).zip(out_rem.chunks_exact_mut(4)) {
for i in 0..3 {
let clamped = inp[i].clamp(0.0, 1.0);
let idx = (clamped * 4095.0 + 0.5) as usize & 0xFFF;
out[i] = lut[idx];
}
out[3] = (inp[3].clamp(0.0, 1.0) * 255.0 + 0.5) as u8;
}
}
#[inline]
pub fn linear_to_srgb_u8_rgba_slice(input: &[f32], output: &mut [u8]) {
assert_eq!(input.len(), output.len());
incant!(
linear_to_srgb_u8_rgba_slice_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
pub fn srgb_u8_to_linear_premultiply_rgba_slice(input: &[u8], output: &mut [f32]) {
assert_eq!(input.len(), output.len());
let (in_chunks, in_remainder) = input.as_chunks::<8>();
let (out_chunks, out_remainder) = output.as_chunks_mut::<8>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
*out = crate::scalar::srgb_u8_to_linear_x8(*inp);
let a0 = inp[3] as f32 / 255.0;
let a1 = inp[7] as f32 / 255.0;
out[0] *= a0;
out[1] *= a0;
out[2] *= a0;
out[3] = a0;
out[4] *= a1;
out[5] *= a1;
out[6] *= a1;
out[7] = a1;
}
let in_rem_pixels = in_remainder.chunks_exact(4);
let out_rem_pixels = out_remainder.chunks_exact_mut(4);
for (inp, out) in in_rem_pixels.zip(out_rem_pixels) {
let a = inp[3] as f32 / 255.0;
out[0] = crate::scalar::srgb_u8_to_linear(inp[0]) * a;
out[1] = crate::scalar::srgb_u8_to_linear(inp[1]) * a;
out[2] = crate::scalar::srgb_u8_to_linear(inp[2]) * a;
out[3] = a;
}
}
#[inline(always)]
fn inv_alpha_or_zero(a: f32) -> f32 {
if a > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a
} else {
0.0
}
}
#[allow(clippy::needless_range_loop)]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn unpremultiply_linear_to_srgb_u8_rgba_slice_tier(token: Token, input: &[f32], output: &mut [u8]) {
let lut = crate::const_luts::linear_to_srgb_u8();
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let a = [inp[3], inp[7], inp[11], inp[15]];
let inv_a = [
inv_alpha_or_zero(a[0]),
inv_alpha_or_zero(a[1]),
inv_alpha_or_zero(a[2]),
inv_alpha_or_zero(a[3]),
];
let mul_arr = [
inv_a[0], inv_a[0], inv_a[0], 1.0, inv_a[1], inv_a[1], inv_a[1], 1.0, inv_a[2],
inv_a[2], inv_a[2], 1.0, inv_a[3], inv_a[3], inv_a[3], 1.0,
];
let mul_vec = f32x16::from_array(token, mul_arr);
let v = f32x16::from_array(token, *inp);
let unpremul = v * mul_vec;
let clamped = unpremul
.max(f32x16::zero(token))
.min(f32x16::splat(token, 1.0));
let idx_f = clamped.mul_add(f32x16::splat(token, 4095.0), f32x16::splat(token, 0.5));
let idx_i = idx_f.to_i32().to_array();
for px in 0..4 {
let base = px * 4;
out[base] = lut[(idx_i[base] as usize) & 0xFFF];
out[base + 1] = lut[(idx_i[base + 1] as usize) & 0xFFF];
out[base + 2] = lut[(idx_i[base + 2] as usize) & 0xFFF];
out[base + 3] = (a[px].clamp(0.0, 1.0) * 255.0 + 0.5) as u8;
}
}
for (inp, out) in in_rem.chunks_exact(4).zip(out_rem.chunks_exact_mut(4)) {
let a = inp[3];
let inv_a = inv_alpha_or_zero(a);
for i in 0..3 {
let clamped = (inp[i] * inv_a).clamp(0.0, 1.0);
let idx = (clamped * 4095.0 + 0.5) as usize & 0xFFF;
out[i] = lut[idx];
}
out[3] = (a.clamp(0.0, 1.0) * 255.0 + 0.5) as u8;
}
}
#[inline]
pub fn unpremultiply_linear_to_srgb_u8_rgba_slice(input: &[f32], output: &mut [u8]) {
assert_eq!(input.len(), output.len());
incant!(
unpremultiply_linear_to_srgb_u8_rgba_slice_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
pub fn srgb_u16_to_linear_rgba_slice(input: &[u16], output: &mut [f32]) {
assert_eq!(input.len(), output.len());
let in_pixels = input.chunks_exact(4);
let out_pixels = output.chunks_exact_mut(4);
for (inp, out) in in_pixels.zip(out_pixels) {
out[0] = crate::scalar::srgb_u16_to_linear(inp[0]);
out[1] = crate::scalar::srgb_u16_to_linear(inp[1]);
out[2] = crate::scalar::srgb_u16_to_linear(inp[2]);
out[3] = inp[3] as f32 / 65535.0;
}
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_u16_rgba_slice_tier(token: Token, input: &[f32], output: &mut [u16]) {
use crate::rational_poly::{L2S_P, L2S_Q, LINEAR_THRESHOLD};
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let linear = f32x16::from_array(token, *inp);
let zero = f32x16::zero(token);
let one = f32x16::splat(token, 1.0);
let clamped = linear.max(zero).min(one);
let linear_result = clamped * f32x16::splat(token, 12.92);
let x = clamped.sqrt();
let yp = f32x16::splat(token, L2S_P[4]).mul_add(x, f32x16::splat(token, L2S_P[3]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[2]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[1]));
let yp = yp.mul_add(x, f32x16::splat(token, L2S_P[0]));
let yq = f32x16::splat(token, L2S_Q[4]).mul_add(x, f32x16::splat(token, L2S_Q[3]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[2]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[1]));
let yq = yq.mul_add(x, f32x16::splat(token, L2S_Q[0]));
let power_result = (yp / yq).min(one);
let thresh_mask = clamped.simd_lt(f32x16::splat(token, LINEAR_THRESHOLD));
let srgb = f32x16::blend(thresh_mask, linear_result, power_result);
let ge_one = linear.simd_ge(one);
let srgb = f32x16::blend(ge_one, one, srgb);
let scaled = srgb.mul_add(f32x16::splat(token, 65535.0), f32x16::splat(token, 0.5));
let idx_i = scaled.to_i32().to_array();
for px in 0..4 {
let base = px * 4;
out[base] = idx_i[base] as u16;
out[base + 1] = idx_i[base + 1] as u16;
out[base + 2] = idx_i[base + 2] as u16;
out[base + 3] = (inp[base + 3].clamp(0.0, 1.0) * 65535.0 + 0.5) as u16;
}
}
for (inp, out) in in_rem.chunks_exact(4).zip(out_rem.chunks_exact_mut(4)) {
out[0] = crate::scalar::linear_to_srgb_u16(inp[0]);
out[1] = crate::scalar::linear_to_srgb_u16(inp[1]);
out[2] = crate::scalar::linear_to_srgb_u16(inp[2]);
out[3] = (inp[3].clamp(0.0, 1.0) * 65535.0 + 0.5) as u16;
}
}
#[inline]
pub fn linear_to_srgb_u16_rgba_slice(input: &[f32], output: &mut [u16]) {
assert_eq!(input.len(), output.len());
incant!(
linear_to_srgb_u16_rgba_slice_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "std")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_srgb_u16_rgba_slice_fast_tier(token: Token, input: &[f32], output: &mut [u16]) {
let lut = crate::u16_lut::encode_lut();
let max_idx = crate::u16_lut::ENCODE_LUT_N - 1;
let scale = crate::u16_lut::ENCODE_SQRT_SCALE;
let (in_chunks, in_rem) = input.as_chunks::<16>();
let (out_chunks, out_rem) = output.as_chunks_mut::<16>();
for (inp, out) in in_chunks.iter().zip(out_chunks.iter_mut()) {
let v = f32x16::from_array(token, *inp);
let clamped = v.max(f32x16::zero(token)).min(f32x16::splat(token, 1.0));
let idx_f = clamped
.sqrt()
.mul_add(f32x16::splat(token, scale), f32x16::splat(token, 0.5));
let idx_i = idx_f.to_i32().to_array();
for px in 0..4 {
let base = px * 4;
out[base] = lut[(idx_i[base] as usize).min(max_idx)];
out[base + 1] = lut[(idx_i[base + 1] as usize).min(max_idx)];
out[base + 2] = lut[(idx_i[base + 2] as usize).min(max_idx)];
out[base + 3] = (inp[base + 3].clamp(0.0, 1.0) * 65535.0 + 0.5) as u16;
}
}
for (inp, out) in in_rem.chunks_exact(4).zip(out_rem.chunks_exact_mut(4)) {
out[0] = crate::scalar::linear_to_srgb_u16_fast(inp[0]);
out[1] = crate::scalar::linear_to_srgb_u16_fast(inp[1]);
out[2] = crate::scalar::linear_to_srgb_u16_fast(inp[2]);
out[3] = (inp[3].clamp(0.0, 1.0) * 65535.0 + 0.5) as u16;
}
}
#[inline]
pub fn linear_to_srgb_u16_rgba_slice_fast(input: &[f32], output: &mut [u16]) {
assert_eq!(input.len(), output.len());
#[cfg(feature = "std")]
{
incant!(
linear_to_srgb_u16_rgba_slice_fast_tier(input, output),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(not(feature = "std"))]
{
let in_pixels = input.chunks_exact(4);
let out_pixels = output.chunks_exact_mut(4);
for (inp, out) in in_pixels.zip(out_pixels) {
out[0] = crate::scalar::linear_to_srgb_u16_fast(inp[0]);
out[1] = crate::scalar::linear_to_srgb_u16_fast(inp[1]);
out[2] = crate::scalar::linear_to_srgb_u16_fast(inp[2]);
out[3] = (inp[3].clamp(0.0, 1.0) * 65535.0 + 0.5) as u16;
}
}
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn gamma_to_linear_slice_tier(token: Token, values: &mut [f32], gamma: f32) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
let clamped = v.max(f32x16::zero(token)).min(f32x16::splat(token, 1.0));
*chunk = clamped.pow_midp(gamma).to_array();
}
for v in remainder {
*v = crate::scalar::gamma_to_linear(*v, gamma);
}
}
#[inline]
pub fn gamma_to_linear_slice(values: &mut [f32], gamma: f32) {
incant!(
gamma_to_linear_slice_tier(values, gamma),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_gamma_slice_tier(token: Token, values: &mut [f32], gamma: f32) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
let clamped = v.max(f32x16::zero(token)).min(f32x16::splat(token, 1.0));
*chunk = clamped.pow_midp(1.0 / gamma).to_array();
}
for v in remainder {
*v = crate::scalar::linear_to_gamma(*v, gamma);
}
}
#[inline]
pub fn linear_to_gamma_slice(values: &mut [f32], gamma: f32) {
incant!(
linear_to_gamma_slice_tier(values, gamma),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn gamma_to_linear_premultiply_rgba_slice_tier(token: Token, values: &mut [f32], gamma: f32) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
*chunk = incant!(mt_gamma_to_linear(*chunk, gamma));
let v = f32x16::from_array(token, *chunk);
let alpha = f32x16::from_array(
token,
[
a[0], a[0], a[0], 1.0, a[1], a[1], a[1], 1.0, a[2], a[2], a[2], 1.0, a[3], a[3],
a[3], 1.0,
],
);
*chunk = (v * alpha).to_array();
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
let a = pixel[3];
pixel[0] = crate::scalar::gamma_to_linear(pixel[0], gamma) * a;
pixel[1] = crate::scalar::gamma_to_linear(pixel[1], gamma) * a;
pixel[2] = crate::scalar::gamma_to_linear(pixel[2], gamma) * a;
}
}
#[deprecated(
since = "0.6.4",
note = "use srgb_to_linear_premultiply_rgba_slice instead; gamma-based premultiply will be removed in a future release"
)]
#[inline]
pub fn gamma_to_linear_premultiply_rgba_slice(values: &mut [f32], gamma: f32) {
incant!(
gamma_to_linear_premultiply_rgba_slice_tier(values, gamma),
[v4, v3, neon, wasm128, scalar]
)
}
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn unpremultiply_linear_to_gamma_rgba_slice_tier(token: Token, values: &mut [f32], gamma: f32) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
let inv = [
if a[0] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[0]
} else {
0.0
},
if a[1] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[1]
} else {
0.0
},
if a[2] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[2]
} else {
0.0
},
if a[3] > crate::UNPREMUL_ALPHA_THRESHOLD {
1.0 / a[3]
} else {
0.0
},
];
let v = f32x16::from_array(token, *chunk);
let inv_alpha = f32x16::from_array(
token,
[
inv[0], inv[0], inv[0], 1.0, inv[1], inv[1], inv[1], 1.0, inv[2], inv[2], inv[2],
1.0, inv[3], inv[3], inv[3], 1.0,
],
);
let unpremul = (v * inv_alpha).to_array();
*chunk = incant!(mt_linear_to_gamma(unpremul, gamma));
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
let a = pixel[3];
if a > crate::UNPREMUL_ALPHA_THRESHOLD {
let inv_a = 1.0 / a;
pixel[0] = crate::scalar::linear_to_gamma(pixel[0] * inv_a, gamma);
pixel[1] = crate::scalar::linear_to_gamma(pixel[1] * inv_a, gamma);
pixel[2] = crate::scalar::linear_to_gamma(pixel[2] * inv_a, gamma);
} else {
pixel[0] = 0.0;
pixel[1] = 0.0;
pixel[2] = 0.0;
}
}
}
#[deprecated(
since = "0.6.4",
note = "use unpremultiply_linear_to_srgb_rgba_slice instead; gamma-based unpremultiply will be removed in a future release"
)]
#[inline]
pub fn unpremultiply_linear_to_gamma_rgba_slice(values: &mut [f32], gamma: f32) {
incant!(
unpremultiply_linear_to_gamma_rgba_slice_tier(values, gamma),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn bt709_to_linear_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::bt709::bt709_to_linear_x16(token, v).to_array();
}
for v in remainder {
*v = crate::tf::bt709_to_linear(*v);
}
}
#[cfg(feature = "transfer")]
#[inline]
pub fn bt709_to_linear_slice(values: &mut [f32]) {
incant!(
bt709_to_linear_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_bt709_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::bt709::linear_to_bt709_x16(token, v).to_array();
}
for v in remainder {
*v = crate::tf::linear_to_bt709(*v);
}
}
#[cfg(feature = "transfer")]
#[inline]
pub fn linear_to_bt709_slice(values: &mut [f32]) {
incant!(
linear_to_bt709_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn pq_to_linear_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::pq::pq_to_linear_x16(token, v).to_array();
}
for v in remainder {
*v = crate::tf::pq_to_linear(*v);
}
}
#[cfg(feature = "transfer")]
#[inline]
pub fn pq_to_linear_slice(values: &mut [f32]) {
incant!(
pq_to_linear_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_pq_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::pq::linear_to_pq_x16(token, v).to_array();
}
for v in remainder {
*v = crate::tf::linear_to_pq(*v);
}
}
#[cfg(feature = "transfer")]
#[inline]
pub fn linear_to_pq_slice(values: &mut [f32]) {
incant!(
linear_to_pq_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn hlg_to_linear_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::hlg::hlg_to_linear_x16(token, v).to_array();
}
for v in remainder {
*v = crate::tf::hlg_to_linear(*v);
}
}
#[cfg(feature = "transfer")]
#[inline]
pub fn hlg_to_linear_slice(values: &mut [f32]) {
incant!(
hlg_to_linear_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
#[archmage::magetypes(define(f32x16), v4(cfg(avx512)), v3, neon, wasm128, scalar)]
fn linear_to_hlg_slice_tier(token: Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let v = f32x16::from_array(token, *chunk);
*chunk = crate::tf::hlg::linear_to_hlg_x16(token, v).to_array();
}
for v in remainder {
*v = crate::tf::linear_to_hlg(*v);
}
}
#[cfg(feature = "transfer")]
#[inline]
pub fn linear_to_hlg_slice(values: &mut [f32]) {
incant!(
linear_to_hlg_slice_tier(values),
[v4, v3, neon, wasm128, scalar]
)
}
#[cfg(feature = "transfer")]
macro_rules! tf_rgba_slice_dispatcher {
(
pub = $pub_name:ident,
tier_base = $tier_base:ident,
tier_v4 = $tier_v4:ident,
tier_v3 = $tier_v3:ident,
tier_neon = $tier_neon:ident,
tier_wasm128 = $tier_wasm128:ident,
tier_scalar = $tier_scalar:ident,
scalar = $scalar:path,
x16_v4 = $x16_v4:path,
x8_v3 = $x8_v3:path,
x4_neon = $x4_neon:path,
x4_wasm128 = $x4_wasm128:path,
doc = $doc:expr,
) => {
#[cfg(feature = "avx512")]
#[arcane]
fn $tier_v4(token: X64V4Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<16>();
for chunk in chunks {
let a = [chunk[3], chunk[7], chunk[11], chunk[15]];
*chunk = $x16_v4(token, *chunk);
[chunk[3], chunk[7], chunk[11], chunk[15]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
pixel[0] = $scalar(pixel[0]);
pixel[1] = $scalar(pixel[1]);
pixel[2] = $scalar(pixel[2]);
}
}
#[arcane]
fn $tier_v3(token: X64V3Token, values: &mut [f32]) {
let (chunks, remainder) = values.as_chunks_mut::<8>();
for chunk in chunks {
let a = [chunk[3], chunk[7]];
*chunk = $x8_v3(token, *chunk);
[chunk[3], chunk[7]] = a;
}
for pixel in remainder.chunks_exact_mut(4) {
pixel[0] = $scalar(pixel[0]);
pixel[1] = $scalar(pixel[1]);
pixel[2] = $scalar(pixel[2]);
}
}
#[arcane]
fn $tier_neon(token: NeonToken, values: &mut [f32]) {
let (chunks, _remainder) = values.as_chunks_mut::<4>();
for chunk in chunks {
let a = chunk[3];
*chunk = $x4_neon(token, *chunk);
chunk[3] = a;
}
}
#[arcane]
fn $tier_wasm128(token: Wasm128Token, values: &mut [f32]) {
let (chunks, _remainder) = values.as_chunks_mut::<4>();
for chunk in chunks {
let a = chunk[3];
*chunk = $x4_wasm128(token, *chunk);
chunk[3] = a;
}
}
fn $tier_scalar(_token: ScalarToken, values: &mut [f32]) {
for pixel in values.chunks_exact_mut(4) {
pixel[0] = $scalar(pixel[0]);
pixel[1] = $scalar(pixel[1]);
pixel[2] = $scalar(pixel[2]);
}
}
#[doc = $doc]
#[inline]
pub fn $pub_name(values: &mut [f32]) {
incant!($tier_base(values), [v4, v3, neon, wasm128, scalar])
}
};
}
#[cfg(feature = "transfer")]
tf_rgba_slice_dispatcher! {
pub = bt709_to_linear_rgba_slice,
tier_base = bt709_to_linear_rgba_slice_tier,
tier_v4 = bt709_to_linear_rgba_slice_tier_v4,
tier_v3 = bt709_to_linear_rgba_slice_tier_v3,
tier_neon = bt709_to_linear_rgba_slice_tier_neon,
tier_wasm128 = bt709_to_linear_rgba_slice_tier_wasm128,
tier_scalar = bt709_to_linear_rgba_slice_tier_scalar,
scalar = crate::tf::bt709_to_linear,
x16_v4 = crate::tokens::x16::bt709_to_linear_v4,
x8_v3 = crate::tokens::x8::bt709_to_linear_v3,
x4_neon = crate::tokens::x4::bt709_to_linear_neon,
x4_wasm128 = crate::tokens::x4::bt709_to_linear_wasm128,
doc = "Convert BT.709 signal RGBA f32 values to linear in-place, preserving alpha.",
}
#[cfg(feature = "transfer")]
tf_rgba_slice_dispatcher! {
pub = linear_to_bt709_rgba_slice,
tier_base = linear_to_bt709_rgba_slice_tier,
tier_v4 = linear_to_bt709_rgba_slice_tier_v4,
tier_v3 = linear_to_bt709_rgba_slice_tier_v3,
tier_neon = linear_to_bt709_rgba_slice_tier_neon,
tier_wasm128 = linear_to_bt709_rgba_slice_tier_wasm128,
tier_scalar = linear_to_bt709_rgba_slice_tier_scalar,
scalar = crate::tf::linear_to_bt709,
x16_v4 = crate::tokens::x16::linear_to_bt709_v4,
x8_v3 = crate::tokens::x8::linear_to_bt709_v3,
x4_neon = crate::tokens::x4::linear_to_bt709_neon,
x4_wasm128 = crate::tokens::x4::linear_to_bt709_wasm128,
doc = "Convert linear RGBA f32 values to BT.709 signal in-place, preserving alpha.",
}
#[cfg(feature = "transfer")]
tf_rgba_slice_dispatcher! {
pub = pq_to_linear_rgba_slice,
tier_base = pq_to_linear_rgba_slice_tier,
tier_v4 = pq_to_linear_rgba_slice_tier_v4,
tier_v3 = pq_to_linear_rgba_slice_tier_v3,
tier_neon = pq_to_linear_rgba_slice_tier_neon,
tier_wasm128 = pq_to_linear_rgba_slice_tier_wasm128,
tier_scalar = pq_to_linear_rgba_slice_tier_scalar,
scalar = crate::tf::pq_to_linear,
x16_v4 = crate::tokens::x16::pq_to_linear_v4,
x8_v3 = crate::tokens::x8::pq_to_linear_v3,
x4_neon = crate::tokens::x4::pq_to_linear_neon,
x4_wasm128 = crate::tokens::x4::pq_to_linear_wasm128,
doc = "Convert PQ (ST 2084) signal RGBA f32 values to linear in-place, preserving alpha.",
}
#[cfg(feature = "transfer")]
tf_rgba_slice_dispatcher! {
pub = linear_to_pq_rgba_slice,
tier_base = linear_to_pq_rgba_slice_tier,
tier_v4 = linear_to_pq_rgba_slice_tier_v4,
tier_v3 = linear_to_pq_rgba_slice_tier_v3,
tier_neon = linear_to_pq_rgba_slice_tier_neon,
tier_wasm128 = linear_to_pq_rgba_slice_tier_wasm128,
tier_scalar = linear_to_pq_rgba_slice_tier_scalar,
scalar = crate::tf::linear_to_pq,
x16_v4 = crate::tokens::x16::linear_to_pq_v4,
x8_v3 = crate::tokens::x8::linear_to_pq_v3,
x4_neon = crate::tokens::x4::linear_to_pq_neon,
x4_wasm128 = crate::tokens::x4::linear_to_pq_wasm128,
doc = "Convert linear RGBA f32 values to PQ (ST 2084) signal in-place, preserving alpha.",
}
#[cfg(feature = "transfer")]
tf_rgba_slice_dispatcher! {
pub = hlg_to_linear_rgba_slice,
tier_base = hlg_to_linear_rgba_slice_tier,
tier_v4 = hlg_to_linear_rgba_slice_tier_v4,
tier_v3 = hlg_to_linear_rgba_slice_tier_v3,
tier_neon = hlg_to_linear_rgba_slice_tier_neon,
tier_wasm128 = hlg_to_linear_rgba_slice_tier_wasm128,
tier_scalar = hlg_to_linear_rgba_slice_tier_scalar,
scalar = crate::tf::hlg_to_linear,
x16_v4 = crate::tokens::x16::hlg_to_linear_v4,
x8_v3 = crate::tokens::x8::hlg_to_linear_v3,
x4_neon = crate::tokens::x4::hlg_to_linear_neon,
x4_wasm128 = crate::tokens::x4::hlg_to_linear_wasm128,
doc = "Convert HLG (ARIB STD-B67) signal RGBA f32 values to linear in-place, preserving alpha.",
}
#[cfg(feature = "transfer")]
tf_rgba_slice_dispatcher! {
pub = linear_to_hlg_rgba_slice,
tier_base = linear_to_hlg_rgba_slice_tier,
tier_v4 = linear_to_hlg_rgba_slice_tier_v4,
tier_v3 = linear_to_hlg_rgba_slice_tier_v3,
tier_neon = linear_to_hlg_rgba_slice_tier_neon,
tier_wasm128 = linear_to_hlg_rgba_slice_tier_wasm128,
tier_scalar = linear_to_hlg_rgba_slice_tier_scalar,
scalar = crate::tf::linear_to_hlg,
x16_v4 = crate::tokens::x16::linear_to_hlg_v4,
x8_v3 = crate::tokens::x8::linear_to_hlg_v3,
x4_neon = crate::tokens::x4::linear_to_hlg_neon,
x4_wasm128 = crate::tokens::x4::linear_to_hlg_wasm128,
doc = "Convert linear RGBA f32 values to HLG (ARIB STD-B67) signal in-place, preserving alpha.",
}
#[cfg(test)]
#[allow(clippy::needless_range_loop)]
mod tests {
use super::*;
#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
#[test]
fn test_srgb_u8_to_linear_x8() {
let input = [0u8, 64, 128, 192, 255, 32, 96, 160];
let result = crate::scalar::srgb_u8_to_linear_x8(input);
for (i, (&r, &inp)) in result.iter().zip(input.iter()).enumerate() {
let expected = crate::scalar::srgb_to_linear(inp as f32 / 255.0);
assert!(
(r - expected).abs() < 1e-4,
"srgb_u8_to_linear_x8 mismatch at {}: got {}, expected {}",
i,
r,
expected
);
}
}
#[test]
fn test_slice_roundtrip() {
let mut values: Vec<f32> = (0..=10).map(|i| i as f32 / 10.0).collect();
let original = values.clone();
srgb_to_linear_slice(&mut values);
linear_to_srgb_slice(&mut values);
for (i, (orig, conv)) in original.iter().zip(values.iter()).enumerate() {
assert!(
(orig - conv).abs() < 1e-5,
"Slice roundtrip failed at {}: {} -> {}",
i,
orig,
conv
);
}
}
#[test]
fn test_srgb_u8_to_linear_slice_basic() {
let input: Vec<u8> = (0..=255).collect();
let mut output = vec![0.0f32; 256];
srgb_u8_to_linear_slice(&input, &mut output);
for (i, &val) in output.iter().enumerate() {
let expected = crate::scalar::srgb_to_linear(i as f32 / 255.0);
assert!(
(val - expected).abs() < 1e-4,
"u8_to_linear mismatch at {}: got {}, expected {}",
i,
val,
expected
);
}
}
#[test]
fn test_linear_to_srgb_u8_slice_basic() {
let input: Vec<f32> = (0..=255).map(|i| i as f32 / 255.0).collect();
let mut output = vec![0u8; 256];
let mut direct = vec![0.0f32; 256];
for (i, &srgb) in input.iter().enumerate() {
direct[i] = crate::scalar::srgb_to_linear(srgb);
}
linear_to_srgb_u8_slice(&direct, &mut output);
for (i, &val) in output.iter().enumerate() {
let diff = (val as i32 - i as i32).unsigned_abs();
assert!(
diff <= 1,
"linear_to_srgb_u8 at {}: got {}, expected {}",
i,
val,
i
);
}
}
#[test]
fn test_gamma_slice_roundtrip() {
let mut values: Vec<f32> = (1..=100).map(|i| i as f32 / 100.0).collect();
let original = values.clone();
gamma_to_linear_slice(&mut values, 2.2);
linear_to_gamma_slice(&mut values, 2.2);
for (i, (&orig, &conv)) in original.iter().zip(values.iter()).enumerate() {
assert!(
(orig - conv).abs() < 1e-3,
"Gamma roundtrip failed at {}: {} -> {}",
i,
orig,
conv
);
}
}
#[test]
fn issue_1_srgb_to_linear_slice_modifies_alpha() {
let mut rgba = vec![
0.5, 0.5, 0.5, 1.0, 0.2, 0.4, 0.8, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.75, ];
let alphas_before: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
srgb_to_linear_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(
alphas_before[0], alphas_after[0],
"alpha=1.0 is a fixed point"
);
assert_eq!(
alphas_before[2], alphas_after[2],
"alpha=0.0 is a fixed point"
);
assert_ne!(
alphas_before[1], alphas_after[1],
"BUG(#1): alpha=0.5 is incorrectly converted by srgb_to_linear_slice"
);
assert_ne!(
alphas_before[3], alphas_after[3],
"BUG(#1): alpha=0.75 is incorrectly converted by srgb_to_linear_slice"
);
}
#[test]
fn issue_1_linear_to_srgb_slice_modifies_alpha() {
let mut rgba = vec![
0.2, 0.2, 0.2, 1.0, 0.1, 0.3, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.8, 0.8, 0.8, 0.25, ];
let alphas_before: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
linear_to_srgb_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(
alphas_before[2], alphas_after[2],
"alpha=0.0 is a fixed point"
);
assert_ne!(
alphas_before[1], alphas_after[1],
"BUG(#1): alpha=0.5 is incorrectly converted by linear_to_srgb_slice"
);
assert_ne!(
alphas_before[3], alphas_after[3],
"BUG(#1): alpha=0.25 is incorrectly converted by linear_to_srgb_slice"
);
}
#[test]
fn issue_1_srgb_u8_to_linear_converts_all_channels() {
let input: Vec<u8> = vec![
128, 128, 128, 255, 64, 128, 192, 128, ];
let mut output = vec![0.0f32; 8];
srgb_u8_to_linear_slice(&input, &mut output);
assert_eq!(output[3], 1.0, "alpha=255/255 should map to 1.0");
let alpha_128_linear = output[7];
let expected_passthrough = 128.0 / 255.0;
assert!(
(alpha_128_linear - expected_passthrough).abs() > 0.01,
"BUG(#1): alpha=128 is sRGB-decoded ({}) instead of passed through ({})",
alpha_128_linear,
expected_passthrough
);
}
#[test]
fn rgba_srgb_to_linear_f32_preserves_alpha() {
let mut rgba = vec![
0.5, 0.5, 0.5, 1.0, 0.2, 0.4, 0.8, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.75, ];
let alphas_before: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
srgb_to_linear_rgba_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(alphas_before, alphas_after, "all alphas must be preserved");
assert_ne!(rgba[0], 0.5, "RGB should be converted");
}
#[test]
fn rgba_linear_to_srgb_f32_preserves_alpha() {
let mut rgba = vec![
0.2, 0.2, 0.2, 1.0, 0.1, 0.3, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.8, 0.8, 0.8, 0.25,
];
let alphas_before: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
linear_to_srgb_rgba_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(alphas_before, alphas_after, "all alphas must be preserved");
}
#[test]
fn rgba_srgb_u8_to_linear_preserves_alpha() {
let input: Vec<u8> = vec![
128, 128, 128, 255, 64, 128, 192, 128, 0, 0, 0, 0, 255, 255, 255, 191, ];
let mut output = vec![0.0f32; 16];
srgb_u8_to_linear_rgba_slice(&input, &mut output);
assert_eq!(output[3], 1.0);
assert!((output[7] - 128.0 / 255.0).abs() < 1e-6);
assert_eq!(output[11], 0.0);
assert!((output[15] - 191.0 / 255.0).abs() < 1e-6);
let srgb_decoded_128 = crate::scalar::srgb_u8_to_linear(128);
assert_eq!(output[5], srgb_decoded_128, "RGB should use sRGB LUT");
assert_ne!(output[7], srgb_decoded_128, "alpha should NOT use sRGB LUT");
}
#[test]
fn rgba_linear_to_srgb_u8_preserves_alpha() {
let linear: Vec<f32> = vec![0.2158, 0.2158, 0.2158, 1.0, 0.0, 0.0, 0.0, 0.5];
let mut output = vec![0u8; 8];
linear_to_srgb_u8_rgba_slice(&linear, &mut output);
assert_eq!(output[3], 255);
assert_eq!(output[7], 128);
assert!((output[0] as i32 - 128).unsigned_abs() <= 1);
}
#[test]
fn rgba_srgb_u16_to_linear_preserves_alpha() {
let input: Vec<u16> = vec![
32768, 32768, 32768, 65535, 16384, 32768, 49152, 32768, ];
let mut output = vec![0.0f32; 8];
srgb_u16_to_linear_rgba_slice(&input, &mut output);
assert_eq!(output[3], 1.0);
assert!((output[7] - 32768.0 / 65535.0).abs() < 1e-6);
assert_ne!(output[0], 32768.0 / 65535.0, "RGB must be sRGB-decoded");
}
#[test]
fn rgba_linear_to_srgb_u16_preserves_alpha() {
let linear: Vec<f32> = vec![0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 0.5];
let mut output = vec![0u16; 8];
linear_to_srgb_u16_rgba_slice(&linear, &mut output);
assert_eq!(output[3], 65535);
assert_eq!(output[7], 32768);
}
#[test]
fn rgba_f32_roundtrip_preserves_all() {
let original = vec![
0.5f32, 0.3, 0.8, 0.42, 0.1, 0.9, 0.0, 1.0, 1.0, 0.0, 0.5, 0.0,
];
let mut rgba = original.clone();
srgb_to_linear_rgba_slice(&mut rgba);
linear_to_srgb_rgba_slice(&mut rgba);
for (i, (&orig, &conv)) in original.iter().zip(rgba.iter()).enumerate() {
if i % 4 == 3 {
assert_eq!(orig, conv, "alpha at pixel {} must be exact", i / 4);
} else {
assert!(
(orig - conv).abs() < 1e-5,
"RGB roundtrip at {}: {} -> {}",
i,
orig,
conv
);
}
}
}
fn make_rgba_srgb(num_pixels: usize) -> Vec<f32> {
(0..num_pixels * 4)
.map(|i| {
if i % 4 == 3 {
0.3 + (i as f32 / 400.0) } else {
(i % 256) as f32 / 255.0
}
})
.collect()
}
#[test]
fn rgba_f32_s2l_various_pixel_counts() {
for num_pixels in [1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33] {
let data = make_rgba_srgb(num_pixels);
let alphas_before: Vec<f32> = data.iter().skip(3).step_by(4).copied().collect();
let mut rgba = data.clone();
srgb_to_linear_rgba_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(
alphas_before, alphas_after,
"alpha mismatch at {num_pixels} pixels"
);
for px in 0..num_pixels {
let srgb_r = data[px * 4];
let linear_r = rgba[px * 4];
if srgb_r > 0.04045 && srgb_r < 1.0 {
assert_ne!(
srgb_r, linear_r,
"RGB should change at pixel {px}/{num_pixels}"
);
}
}
}
}
#[test]
fn rgba_f32_l2s_various_pixel_counts() {
for num_pixels in [1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33] {
let data = make_rgba_srgb(num_pixels);
let alphas_before: Vec<f32> = data.iter().skip(3).step_by(4).copied().collect();
let mut rgba = data.clone();
linear_to_srgb_rgba_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(
alphas_before, alphas_after,
"alpha mismatch at {num_pixels} pixels"
);
}
}
#[test]
fn rgba_rgb_channels_match_plain_s2l() {
let srgb_values = make_rgba_srgb(20);
let mut plain = srgb_values.clone();
srgb_to_linear_slice(&mut plain);
let mut rgba = srgb_values.clone();
srgb_to_linear_rgba_slice(&mut rgba);
for px in 0..20 {
for ch in 0..3 {
let idx = px * 4 + ch;
assert_eq!(
plain[idx], rgba[idx],
"RGB mismatch at pixel {px} channel {ch}"
);
}
}
}
#[test]
fn rgba_rgb_channels_match_plain_l2s() {
let linear_values = make_rgba_srgb(20);
let mut plain = linear_values.clone();
linear_to_srgb_slice(&mut plain);
let mut rgba = linear_values.clone();
linear_to_srgb_rgba_slice(&mut rgba);
for px in 0..20 {
for ch in 0..3 {
let idx = px * 4 + ch;
assert_eq!(
plain[idx], rgba[idx],
"RGB mismatch at pixel {px} channel {ch}"
);
}
}
}
#[test]
fn rgba_f32_empty_slice() {
let mut empty: Vec<f32> = vec![];
srgb_to_linear_rgba_slice(&mut empty);
linear_to_srgb_rgba_slice(&mut empty);
assert!(empty.is_empty());
}
#[test]
fn rgba_u8_empty_slice() {
let input: Vec<u8> = vec![];
let mut output: Vec<f32> = vec![];
srgb_u8_to_linear_rgba_slice(&input, &mut output);
assert!(output.is_empty());
}
#[test]
fn rgba_f32_trailing_elements_ignored() {
let mut data = vec![0.5, 0.5, 0.5, 0.42, 0.99];
let trailing_before = data[4];
srgb_to_linear_rgba_slice(&mut data);
assert_eq!(data[3], 0.42);
assert_eq!(data[4], trailing_before);
}
#[test]
fn rgba_u8_trailing_elements_ignored() {
let input = vec![128u8, 128, 128, 200, 99, 99];
let mut output = vec![0.0f32; 6];
srgb_u8_to_linear_rgba_slice(&input, &mut output);
assert_eq!(output[3], 200.0 / 255.0);
assert_eq!(output[4], 0.0);
assert_eq!(output[5], 0.0);
}
#[test]
fn rgba_u8_to_linear_batch_boundaries() {
for num_pixels in [1, 2, 3, 4, 5, 8, 9, 16, 17] {
let input: Vec<u8> = (0..num_pixels * 4)
.map(|i| {
if i % 4 == 3 {
((i / 4) * 30 + 10) as u8 } else {
128u8
}
})
.collect();
let mut output = vec![0.0f32; num_pixels * 4];
srgb_u8_to_linear_rgba_slice(&input, &mut output);
for px in 0..num_pixels {
let alpha_in = input[px * 4 + 3];
let alpha_out = output[px * 4 + 3];
let expected = alpha_in as f32 / 255.0;
assert!(
(alpha_out - expected).abs() < 1e-6,
"alpha mismatch at pixel {px}/{num_pixels}: got {alpha_out}, expected {expected}"
);
let rgb_out = output[px * 4];
let expected_rgb = crate::scalar::srgb_u8_to_linear(128);
assert_eq!(rgb_out, expected_rgb, "RGB mismatch at pixel {px}");
}
}
}
#[test]
fn rgba_u8_roundtrip() {
let input: Vec<u8> = vec![
0, 0, 0, 0, 128, 128, 128, 128, 255, 255, 255, 255, 64, 192, 32, 200, ];
let mut linear = vec![0.0f32; 16];
srgb_u8_to_linear_rgba_slice(&input, &mut linear);
let mut output = vec![0u8; 16];
linear_to_srgb_u8_rgba_slice(&linear, &mut output);
for px in 0..4 {
assert_eq!(
input[px * 4 + 3],
output[px * 4 + 3],
"alpha roundtrip failed at pixel {px}"
);
for ch in 0..3 {
let diff = (input[px * 4 + ch] as i32 - output[px * 4 + ch] as i32).unsigned_abs();
assert!(
diff <= 1,
"RGB roundtrip at pixel {px} ch {ch}: {} -> {}",
input[px * 4 + ch],
output[px * 4 + ch]
);
}
}
}
#[test]
fn rgba_u16_roundtrip() {
let input: Vec<u16> = vec![
0, 0, 0, 0, 32768, 32768, 32768, 32768, 65535, 65535, 65535, 65535, 16384, 49152, 8192,
40000,
];
let mut linear = vec![0.0f32; 16];
srgb_u16_to_linear_rgba_slice(&input, &mut linear);
let mut output = vec![0u16; 16];
linear_to_srgb_u16_rgba_slice(&linear, &mut output);
for px in 0..4 {
assert_eq!(
input[px * 4 + 3],
output[px * 4 + 3],
"alpha must roundtrip exactly at pixel {px}"
);
}
}
#[test]
fn test_u16_slice_roundtrip() {
let input: Vec<u16> = (0..=255).map(|i| (i * 257) as u16).collect();
let mut linear = vec![0.0f32; 256];
srgb_u16_to_linear_slice(&input, &mut linear);
let mut output = vec![0u16; 256];
linear_to_srgb_u16_slice(&linear, &mut output);
let mut max_diff = 0u32;
for (i, (&inp, &out)) in input.iter().zip(output.iter()).enumerate() {
let diff = (inp as i32 - out as i32).unsigned_abs();
max_diff = max_diff.max(diff);
assert!(
diff <= 10,
"u16 roundtrip at {i}: {inp} -> {out} (diff {diff})"
);
}
assert_eq!(output[255], 65535, "u16 max must roundtrip exactly");
}
#[test]
fn rgba_f32_near_threshold() {
let mut rgba = vec![
0.04044, 0.04045, 0.04046, 0.77, 0.003130, 0.0031308, 0.003132, 0.33, ];
let alphas_before = [rgba[3], rgba[7]];
srgb_to_linear_rgba_slice(&mut rgba);
assert_eq!(rgba[3], alphas_before[0]);
assert_eq!(rgba[7], alphas_before[1]);
assert!(rgba[0] < rgba[1], "s2l should be monotonic");
assert!(rgba[1] < rgba[2], "s2l should be monotonic");
linear_to_srgb_rgba_slice(&mut rgba);
assert_eq!(rgba[3], alphas_before[0]);
assert_eq!(rgba[7], alphas_before[1]);
}
#[test]
fn rgba_f32_boundary_values() {
let mut rgba = vec![
0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, ];
srgb_to_linear_rgba_slice(&mut rgba);
assert_eq!(rgba[0], 0.0, "srgb_to_linear(0.0) must be 0.0");
assert_eq!(rgba[3], 0.0, "alpha 0.0 preserved");
assert_eq!(rgba[7], 1.0, "alpha 1.0 preserved");
let mut rgba2 = vec![0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0];
linear_to_srgb_rgba_slice(&mut rgba2);
assert_eq!(rgba2[0], 0.0, "linear_to_srgb(0.0) must be 0.0");
assert_eq!(rgba2[3], 0.0, "alpha 0.0 preserved");
assert_eq!(rgba2[7], 1.0, "alpha 1.0 preserved");
}
#[test]
fn rgba_u8_boundary_values() {
let input = vec![0u8, 0, 0, 0, 255, 255, 255, 255];
let mut output = vec![0.0f32; 8];
srgb_u8_to_linear_rgba_slice(&input, &mut output);
assert_eq!(output[0], 0.0);
assert_eq!(output[3], 0.0); assert_eq!(output[4], 1.0); assert_eq!(output[7], 1.0);
let linear_input = vec![0.0f32, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0];
let mut u8_output = vec![0u8; 8];
linear_to_srgb_u8_rgba_slice(&linear_input, &mut u8_output);
assert_eq!(u8_output[0], 0);
assert_eq!(u8_output[3], 0); assert_eq!(u8_output[4], 255);
assert_eq!(u8_output[7], 255); }
#[test]
fn rgba_u8_rgb_matches_non_rgba() {
let input: Vec<u8> = (0..40).map(|i| (i * 6 + 10) as u8).collect(); let mut rgba_out = vec![0.0f32; 40];
let mut plain_out = vec![0.0f32; 40];
srgb_u8_to_linear_rgba_slice(&input, &mut rgba_out);
srgb_u8_to_linear_slice(&input, &mut plain_out);
for px in 0..10 {
for ch in 0..3 {
let idx = px * 4 + ch;
assert_eq!(
rgba_out[idx], plain_out[idx],
"u8 RGB mismatch at pixel {px} ch {ch}"
);
}
let alpha_val = input[px * 4 + 3];
if alpha_val > 10 && alpha_val < 245 {
assert_ne!(
rgba_out[px * 4 + 3],
plain_out[px * 4 + 3],
"alpha should differ between RGBA and plain at pixel {px}"
);
}
}
}
#[test]
fn premultiply_f32_basic() {
let mut rgba = vec![
0.5, 0.5, 0.5, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, ];
srgb_to_linear_premultiply_rgba_slice(&mut rgba);
let expected_full = crate::scalar::srgb_to_linear(0.5);
assert!(
(rgba[0] - expected_full).abs() < 1e-5,
"full alpha: {} vs {}",
rgba[0],
expected_full
);
assert_eq!(rgba[3], 1.0, "alpha preserved");
let expected_half = crate::scalar::srgb_to_linear(0.5) * 0.5;
assert!(
(rgba[4] - expected_half).abs() < 1e-5,
"half alpha: {} vs {}",
rgba[4],
expected_half
);
assert_eq!(rgba[7], 0.5, "alpha preserved");
assert_eq!(rgba[8], 0.0, "zero alpha: R=0");
assert_eq!(rgba[9], 0.0, "zero alpha: G=0");
assert_eq!(rgba[10], 0.0, "zero alpha: B=0");
assert_eq!(rgba[11], 0.0, "alpha preserved");
}
#[test]
fn premultiply_f32_roundtrip() {
let original = vec![
0.5f32, 0.3, 0.8, 0.75, 0.1, 0.9, 0.5, 1.0, 1.0, 0.0, 0.5, 0.25, ];
let mut rgba = original.clone();
srgb_to_linear_premultiply_rgba_slice(&mut rgba);
unpremultiply_linear_to_srgb_rgba_slice(&mut rgba);
for (i, (&orig, &conv)) in original.iter().zip(rgba.iter()).enumerate() {
if i % 4 == 3 {
assert_eq!(orig, conv, "alpha must be exact at index {i}");
} else {
assert!(
(orig - conv).abs() < 1e-4,
"RGB roundtrip at {i}: {} -> {} (diff {})",
orig,
conv,
(orig - conv).abs()
);
}
}
}
#[test]
fn premultiply_f32_zero_alpha_roundtrip() {
let mut rgba = vec![0.5, 0.8, 0.3, 0.0];
srgb_to_linear_premultiply_rgba_slice(&mut rgba);
assert_eq!(rgba, [0.0, 0.0, 0.0, 0.0], "premul with a=0 → all zero");
unpremultiply_linear_to_srgb_rgba_slice(&mut rgba);
assert_eq!(rgba, [0.0, 0.0, 0.0, 0.0], "unpremul a=0 → stays zero");
}
#[test]
fn premultiply_f32_various_pixel_counts() {
for num_pixels in [1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33] {
let data = make_rgba_srgb(num_pixels);
let alphas: Vec<f32> = data.iter().skip(3).step_by(4).copied().collect();
let mut rgba = data.clone();
srgb_to_linear_premultiply_rgba_slice(&mut rgba);
let alphas_after: Vec<f32> = rgba.iter().skip(3).step_by(4).copied().collect();
assert_eq!(
alphas, alphas_after,
"alpha mismatch at {num_pixels} pixels"
);
for px in 0..num_pixels {
let a = alphas[px];
for ch in 0..3 {
let idx = px * 4 + ch;
let expected = crate::scalar::srgb_to_linear(data[idx]) * a;
assert!(
(rgba[idx] - expected).abs() < 1e-5,
"premul mismatch at pixel {px} ch {ch} (npx={num_pixels}): {} vs {}",
rgba[idx],
expected
);
}
}
unpremultiply_linear_to_srgb_rgba_slice(&mut rgba);
for px in 0..num_pixels {
let a = alphas[px];
if a > 0.0 {
for ch in 0..3 {
let idx = px * 4 + ch;
assert!(
(rgba[idx] - data[idx]).abs() < 1e-4,
"roundtrip at px {px} ch {ch} (npx={num_pixels}): {} vs {}",
rgba[idx],
data[idx]
);
}
}
}
}
}
#[test]
fn premultiply_rgb_matches_non_premul_at_alpha_1() {
let mut premul_data: Vec<f32> = (0..80)
.map(|i| {
if i % 4 == 3 {
1.0 } else {
(i % 256) as f32 / 255.0
}
})
.collect();
let mut plain_data = premul_data.clone();
srgb_to_linear_premultiply_rgba_slice(&mut premul_data);
srgb_to_linear_rgba_slice(&mut plain_data);
for (i, (&p, &n)) in premul_data.iter().zip(plain_data.iter()).enumerate() {
assert!(
(p - n).abs() < 1e-6,
"alpha=1 mismatch at {i}: premul={p} vs plain={n}"
);
}
}
#[test]
fn premultiply_u8_basic() {
let input = vec![
128u8, 128, 128, 255, 128, 128, 128, 128, 128, 128, 128, 0, ];
let mut output = vec![0.0f32; 12];
srgb_u8_to_linear_premultiply_rgba_slice(&input, &mut output);
let expected_128 = crate::scalar::srgb_u8_to_linear(128);
assert!(
(output[0] - expected_128).abs() < 1e-5,
"full alpha u8: {} vs {}",
output[0],
expected_128
);
assert_eq!(output[3], 1.0);
let a_half = 128.0 / 255.0;
let expected_half = expected_128 * a_half;
assert!(
(output[4] - expected_half).abs() < 1e-5,
"half alpha u8: {} vs {}",
output[4],
expected_half
);
assert!((output[7] - a_half).abs() < 1e-6);
assert_eq!(output[8], 0.0);
assert_eq!(output[9], 0.0);
assert_eq!(output[10], 0.0);
assert_eq!(output[11], 0.0);
}
#[test]
fn premultiply_u8_roundtrip() {
let input: Vec<u8> = vec![
0, 0, 0, 0, 128, 128, 128, 128, 255, 255, 255, 255, 64, 192, 32, 200, ];
let mut linear = vec![0.0f32; 16];
srgb_u8_to_linear_premultiply_rgba_slice(&input, &mut linear);
let mut output = vec![0u8; 16];
unpremultiply_linear_to_srgb_u8_rgba_slice(&linear, &mut output);
for px in 0..4 {
assert_eq!(
input[px * 4 + 3],
output[px * 4 + 3],
"alpha roundtrip at pixel {px}"
);
if input[px * 4 + 3] > 0 {
for ch in 0..3 {
let diff =
(input[px * 4 + ch] as i32 - output[px * 4 + ch] as i32).unsigned_abs();
assert!(
diff <= 1,
"u8 premul roundtrip at px {px} ch {ch}: {} -> {}",
input[px * 4 + ch],
output[px * 4 + ch]
);
}
}
}
}
#[test]
fn premultiply_f32_empty() {
let mut empty: Vec<f32> = vec![];
srgb_to_linear_premultiply_rgba_slice(&mut empty);
unpremultiply_linear_to_srgb_rgba_slice(&mut empty);
}
#[test]
fn premultiply_u8_empty() {
let input: Vec<u8> = vec![];
let mut output: Vec<f32> = vec![];
srgb_u8_to_linear_premultiply_rgba_slice(&input, &mut output);
}
#[test]
fn premultiply_f32_boundary_values() {
let mut rgba = vec![
0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ];
srgb_to_linear_premultiply_rgba_slice(&mut rgba);
assert_eq!(&rgba[0..4], &[0.0, 0.0, 0.0, 0.0]);
assert_eq!(rgba[4], 1.0);
assert_eq!(rgba[7], 1.0);
assert_eq!(&rgba[8..12], &[0.0, 0.0, 0.0, 0.0]);
}
#[test]
fn premultiply_u8_batch_boundaries() {
for num_pixels in [1, 2, 3, 4, 5, 8, 9, 16, 17] {
let input: Vec<u8> = (0..num_pixels * 4)
.map(|i| {
if i % 4 == 3 {
((i / 4) * 30 + 10) as u8
} else {
128u8
}
})
.collect();
let mut output = vec![0.0f32; num_pixels * 4];
srgb_u8_to_linear_premultiply_rgba_slice(&input, &mut output);
let expected_rgb = crate::scalar::srgb_u8_to_linear(128);
for px in 0..num_pixels {
let a = input[px * 4 + 3] as f32 / 255.0;
let expected_premul = expected_rgb * a;
assert!(
(output[px * 4] - expected_premul).abs() < 1e-5,
"u8 premul batch at px {px}/{num_pixels}: {} vs {}",
output[px * 4],
expected_premul
);
assert!(
(output[px * 4 + 3] - a).abs() < 1e-6,
"u8 premul alpha at px {px}/{num_pixels}"
);
}
}
}
#[test]
#[allow(deprecated)]
fn gamma_premultiply_roundtrip() {
for num_pixels in 1..=17 {
let mut rgba: Vec<f32> = (0..num_pixels)
.flat_map(|i| {
let v = i as f32 / num_pixels as f32;
[v, v * 0.5, v * 0.8, 0.75]
})
.collect();
let original = rgba.clone();
gamma_to_linear_premultiply_rgba_slice(&mut rgba, 2.2);
unpremultiply_linear_to_gamma_rgba_slice(&mut rgba, 2.2);
for (i, (&orig, &conv)) in original.iter().zip(rgba.iter()).enumerate() {
if i % 4 == 3 {
assert_eq!(orig, conv, "alpha changed at {i}/{num_pixels}");
} else {
assert!(
(orig - conv).abs() < 2e-3,
"gamma premul roundtrip at {i}/{num_pixels}: {orig} -> {conv}"
);
}
}
}
}
#[test]
#[allow(deprecated)]
fn gamma_premultiply_zero_alpha() {
let mut rgba = vec![0.5f32, 0.5, 0.5, 0.0, 0.8, 0.8, 0.8, 1.0];
gamma_to_linear_premultiply_rgba_slice(&mut rgba, 2.2);
assert_eq!(rgba[0], 0.0);
assert_eq!(rgba[1], 0.0);
assert_eq!(rgba[2], 0.0);
assert_eq!(rgba[3], 0.0);
assert!(rgba[4] > 0.0);
assert_eq!(rgba[7], 1.0);
unpremultiply_linear_to_gamma_rgba_slice(&mut rgba, 2.2);
assert_eq!(rgba[0], 0.0);
assert_eq!(rgba[3], 0.0);
assert!((rgba[4] - 0.8).abs() < 2e-3);
}
#[test]
fn gamma_rgba_slice_basic() {
let mut values: Vec<f32> = (0..100).map(|i| i as f32 / 99.0).collect();
let original = values.clone();
gamma_to_linear_slice(&mut values, 1.8);
linear_to_gamma_slice(&mut values, 1.8);
for (i, (&orig, &conv)) in original.iter().zip(values.iter()).enumerate() {
assert!(
(orig - conv).abs() < 1e-3,
"gamma 1.8 roundtrip at {i}: {orig} -> {conv}"
);
}
}
const TEST_LENGTHS: &[usize] = &[1, 3, 7, 8, 9, 15, 16, 17, 31, 32, 33, 100];
fn make_srgb_values(n: usize) -> Vec<f32> {
(0..n).map(|i| (i % 256) as f32 / 255.0).collect()
}
fn make_linear_values(n: usize) -> Vec<f32> {
(0..n).map(|i| (i % 256) as f32 / 255.0).collect()
}
fn make_u8_values(n: usize) -> Vec<u8> {
(0..n).map(|i| (i % 256) as u8).collect()
}
fn make_u16_values(n: usize) -> Vec<u16> {
(0..n).map(|i| ((i % 256) * 257) as u16).collect()
}
#[test]
fn length_srgb_to_linear_slice() {
for &n in TEST_LENGTHS {
let mut values = make_srgb_values(n);
let original = values.clone();
srgb_to_linear_slice(&mut values);
for (i, (&s, &l)) in original.iter().zip(values.iter()).enumerate() {
let expected = crate::scalar::srgb_to_linear(s);
assert!(
(l - expected).abs() < 1e-5,
"s2l_slice n={n} i={i}: {l} vs {expected}"
);
}
}
}
#[test]
fn length_linear_to_srgb_slice() {
for &n in TEST_LENGTHS {
let mut values = make_linear_values(n);
let original = values.clone();
linear_to_srgb_slice(&mut values);
for (i, (&l, &s)) in original.iter().zip(values.iter()).enumerate() {
let expected = crate::scalar::linear_to_srgb(l);
assert!(
(s - expected).abs() < 1e-5,
"l2s_slice n={n} i={i}: {s} vs {expected}"
);
}
}
}
#[test]
fn length_srgb_u8_to_linear_slice() {
for &n in TEST_LENGTHS {
let input = make_u8_values(n);
let mut output = vec![0.0f32; n];
srgb_u8_to_linear_slice(&input, &mut output);
for (i, (&u, &l)) in input.iter().zip(output.iter()).enumerate() {
let expected = crate::scalar::srgb_u8_to_linear(u);
assert!(
(l - expected).abs() < 1e-5,
"u8_s2l n={n} i={i}: {l} vs {expected}"
);
}
}
}
#[test]
fn length_linear_to_srgb_u8_slice() {
for &n in TEST_LENGTHS {
let input = make_linear_values(n);
let mut output = vec![0u8; n];
linear_to_srgb_u8_slice(&input, &mut output);
for (i, (&l, &u)) in input.iter().zip(output.iter()).enumerate() {
let expected = crate::scalar::linear_to_srgb_u8(l);
let diff = (u as i32 - expected as i32).unsigned_abs();
assert!(diff <= 1, "l2s_u8 n={n} i={i}: {u} vs {expected}");
}
}
}
#[test]
fn length_srgb_u16_to_linear_slice() {
for &n in TEST_LENGTHS {
let input = make_u16_values(n);
let mut output = vec![0.0f32; n];
srgb_u16_to_linear_slice(&input, &mut output);
for (i, (&u, &l)) in input.iter().zip(output.iter()).enumerate() {
let expected = crate::scalar::srgb_u16_to_linear(u);
assert!(
(l - expected).abs() < 1e-4,
"u16_s2l n={n} i={i}: {l} vs {expected}"
);
}
}
}
#[test]
fn length_linear_to_srgb_u16_slice() {
for &n in TEST_LENGTHS {
let input = make_linear_values(n);
let mut output = vec![0u16; n];
linear_to_srgb_u16_slice(&input, &mut output);
for (i, (&l, &u)) in input.iter().zip(output.iter()).enumerate() {
let expected = crate::scalar::linear_to_srgb_u16(l);
let diff = (u as i32 - expected as i32).unsigned_abs();
assert!(diff <= 1, "l2s_u16 n={n} i={i}: {u} vs {expected}");
}
}
}
#[test]
fn length_gamma_to_linear_slice() {
for &n in TEST_LENGTHS {
let mut values = make_srgb_values(n);
let original = values.clone();
gamma_to_linear_slice(&mut values, 2.2);
linear_to_gamma_slice(&mut values, 2.2);
for (i, (&orig, &conv)) in original.iter().zip(values.iter()).enumerate() {
assert!(
(orig - conv).abs() < 1e-3,
"gamma roundtrip n={n} i={i}: {orig} vs {conv}"
);
}
}
}
#[test]
fn length_u8_rgba_roundtrip() {
for &num_pixels in TEST_LENGTHS {
let n = num_pixels * 4;
let input: Vec<u8> = (0..n).map(|i| (i % 256) as u8).collect();
let mut linear = vec![0.0f32; n];
srgb_u8_to_linear_rgba_slice(&input, &mut linear);
let mut output = vec![0u8; n];
linear_to_srgb_u8_rgba_slice(&linear, &mut output);
for px in 0..num_pixels {
assert_eq!(
input[px * 4 + 3],
output[px * 4 + 3],
"u8 RGBA alpha roundtrip at px {px}/{num_pixels}"
);
for ch in 0..3 {
let diff =
(input[px * 4 + ch] as i32 - output[px * 4 + ch] as i32).unsigned_abs();
assert!(
diff <= 1,
"u8 RGBA RGB roundtrip at px {px} ch {ch}/{num_pixels}: {} vs {}",
input[px * 4 + ch],
output[px * 4 + ch]
);
}
}
}
}
#[test]
fn length_u16_rgba_roundtrip() {
for &num_pixels in TEST_LENGTHS {
let n = num_pixels * 4;
let input: Vec<u16> = (0..n).map(|i| ((i % 256) * 257) as u16).collect();
let mut linear = vec![0.0f32; n];
srgb_u16_to_linear_rgba_slice(&input, &mut linear);
for px in 0..num_pixels {
let expected_a = input[px * 4 + 3] as f32 / 65535.0;
assert!(
(linear[px * 4 + 3] - expected_a).abs() < 1e-5,
"u16 RGBA alpha at px {px}/{num_pixels}"
);
}
let mut output = vec![0u16; n];
linear_to_srgb_u16_rgba_slice(&linear, &mut output);
for px in 0..num_pixels {
assert_eq!(
input[px * 4 + 3],
output[px * 4 + 3],
"u16 RGBA alpha roundtrip at px {px}/{num_pixels}"
);
}
}
}
#[test]
fn length_premultiply_u8_roundtrip() {
for &num_pixels in TEST_LENGTHS {
let n = num_pixels * 4;
let input: Vec<u8> = (0..n)
.map(|i| {
if i % 4 == 3 {
((i / 4) * 15 + 50).min(255) as u8
} else {
128u8
}
})
.collect();
let mut linear = vec![0.0f32; n];
srgb_u8_to_linear_premultiply_rgba_slice(&input, &mut linear);
let mut output = vec![0u8; n];
unpremultiply_linear_to_srgb_u8_rgba_slice(&linear, &mut output);
for px in 0..num_pixels {
assert_eq!(
input[px * 4 + 3],
output[px * 4 + 3],
"u8 premul alpha roundtrip at px {px}/{num_pixels}"
);
if input[px * 4 + 3] > 0 {
for ch in 0..3 {
let diff =
(input[px * 4 + ch] as i32 - output[px * 4 + ch] as i32).unsigned_abs();
assert!(
diff <= 1,
"u8 premul RGB roundtrip at px {px} ch {ch}/{num_pixels}: {} vs {}",
input[px * 4 + ch],
output[px * 4 + ch]
);
}
}
}
}
}
#[test]
fn simd_power_result_clamped_near_one() {
let near_one: Vec<f32> = (0..1000)
.map(|i| 1.0 - (i as f32) * 1e-7)
.chain(core::iter::once(1.0))
.collect();
let mut simd_s2l = near_one.clone();
srgb_to_linear_slice(&mut simd_s2l);
for (i, &v) in simd_s2l.iter().enumerate() {
assert!(
v <= 1.0,
"srgb_to_linear overshoot at index {i}: input={}, output={} (bits: {:08x})",
near_one[i],
v,
v.to_bits()
);
assert!(v >= 0.0, "srgb_to_linear undershoot at index {i}");
}
let mut simd_l2s = near_one.clone();
linear_to_srgb_slice(&mut simd_l2s);
for (i, &v) in simd_l2s.iter().enumerate() {
assert!(
v <= 1.0,
"linear_to_srgb overshoot at index {i}: input={}, output={} (bits: {:08x})",
near_one[i],
v,
v.to_bits()
);
assert!(v >= 0.0, "linear_to_srgb undershoot at index {i}");
}
for (i, &input) in near_one.iter().enumerate() {
let scalar_s2l = crate::scalar::srgb_to_linear(input);
let scalar_l2s = crate::scalar::linear_to_srgb(input);
assert!(
(simd_s2l[i] - scalar_s2l).abs() <= 1e-6,
"srgb_to_linear SIMD/scalar mismatch at {i}: input={input}, simd={}, scalar={}",
simd_s2l[i],
scalar_s2l
);
assert!(
(simd_l2s[i] - scalar_l2s).abs() <= 1e-6,
"linear_to_srgb SIMD/scalar mismatch at {i}: input={input}, simd={}, scalar={}",
simd_l2s[i],
scalar_l2s
);
}
}
#[cfg(feature = "transfer")]
mod tf_slice_parity {
use super::super::*;
fn sweep() -> Vec<f32> {
(0..=1024)
.map(|i| i as f32 / 1024.0)
.chain([0.0, 0.25, 0.5, 0.75, 1.0])
.collect()
}
fn check<F, G>(name: &str, slice_fn: F, scalar_fn: G, tol: f32)
where
F: Fn(&mut [f32]),
G: Fn(f32) -> f32,
{
let input = sweep();
let mut simd = input.clone();
slice_fn(&mut simd);
for (i, &v) in input.iter().enumerate() {
let expect = scalar_fn(v);
let got = simd[i];
assert!(
(got - expect).abs() <= tol,
"{name} mismatch at {i} (input={v}): simd={got}, scalar={expect}"
);
}
}
#[test]
fn bt709_to_linear_slice_matches_scalar() {
check(
"bt709_to_linear_slice",
bt709_to_linear_slice,
crate::tf::bt709_to_linear,
1e-5,
);
}
#[test]
fn linear_to_bt709_slice_matches_scalar() {
check(
"linear_to_bt709_slice",
linear_to_bt709_slice,
crate::tf::linear_to_bt709,
1e-4,
);
}
#[test]
fn pq_to_linear_slice_matches_scalar() {
check(
"pq_to_linear_slice",
pq_to_linear_slice,
crate::tf::pq_to_linear,
1e-5,
);
}
#[test]
fn linear_to_pq_slice_matches_scalar() {
check(
"linear_to_pq_slice",
linear_to_pq_slice,
crate::tf::linear_to_pq,
1e-5,
);
}
#[test]
fn hlg_to_linear_slice_matches_scalar() {
check(
"hlg_to_linear_slice",
hlg_to_linear_slice,
crate::tf::hlg_to_linear,
1e-4,
);
}
#[test]
fn linear_to_hlg_slice_matches_scalar() {
check(
"linear_to_hlg_slice",
linear_to_hlg_slice,
crate::tf::linear_to_hlg,
1e-4,
);
}
fn rgba_sweep() -> Vec<f32> {
let mut v = Vec::with_capacity(257 * 4);
for i in 0..257 {
let t = i as f32 / 256.0;
v.push(t);
v.push((t * 0.5).min(1.0));
v.push((1.0 - t).max(0.0));
v.push(-0.125 - t * 0.3);
}
v
}
fn check_rgba<F, G>(name: &str, rgba_fn: F, scalar_fn: G, tol: f32)
where
F: Fn(&mut [f32]),
G: Fn(f32) -> f32,
{
let input = rgba_sweep();
let mut out = input.clone();
rgba_fn(&mut out);
for (px_idx, (in_px, out_px)) in
input.chunks_exact(4).zip(out.chunks_exact(4)).enumerate()
{
for ch in 0..3 {
let expect = scalar_fn(in_px[ch]);
assert!(
(out_px[ch] - expect).abs() <= tol,
"{name} RGB mismatch at px {px_idx} ch {ch}: input={}, simd={}, scalar={}",
in_px[ch],
out_px[ch],
expect
);
}
assert_eq!(
out_px[3].to_bits(),
in_px[3].to_bits(),
"{name} alpha clobbered at px {px_idx}: was {}, now {}",
in_px[3],
out_px[3]
);
}
}
#[test]
fn bt709_to_linear_rgba_slice_matches_scalar_and_preserves_alpha() {
check_rgba(
"bt709_to_linear_rgba_slice",
bt709_to_linear_rgba_slice,
crate::tf::bt709_to_linear,
1e-5,
);
}
#[test]
fn linear_to_bt709_rgba_slice_matches_scalar_and_preserves_alpha() {
check_rgba(
"linear_to_bt709_rgba_slice",
linear_to_bt709_rgba_slice,
crate::tf::linear_to_bt709,
1e-4,
);
}
#[test]
fn pq_to_linear_rgba_slice_matches_scalar_and_preserves_alpha() {
check_rgba(
"pq_to_linear_rgba_slice",
pq_to_linear_rgba_slice,
crate::tf::pq_to_linear,
1e-5,
);
}
#[test]
fn linear_to_pq_rgba_slice_matches_scalar_and_preserves_alpha() {
check_rgba(
"linear_to_pq_rgba_slice",
linear_to_pq_rgba_slice,
crate::tf::linear_to_pq,
1e-5,
);
}
#[test]
fn hlg_to_linear_rgba_slice_matches_scalar_and_preserves_alpha() {
check_rgba(
"hlg_to_linear_rgba_slice",
hlg_to_linear_rgba_slice,
crate::tf::hlg_to_linear,
1e-4,
);
}
#[test]
fn linear_to_hlg_rgba_slice_matches_scalar_and_preserves_alpha() {
check_rgba(
"linear_to_hlg_rgba_slice",
linear_to_hlg_rgba_slice,
crate::tf::linear_to_hlg,
1e-4,
);
}
}
}