#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
#[cfg(feature = "std")]
use core::cell::RefCell;
use crate::color;
use crate::fastmath;
use crate::simd;
#[cfg(feature = "std")]
fn unpremultiply_to_scratch(src: &[f32], f: impl FnOnce(&[f32])) {
thread_local! {
static SCRATCH: RefCell<Vec<f32>> = const { RefCell::new(Vec::new()) };
}
SCRATCH.with(|cell| {
let mut scratch = cell.borrow_mut();
scratch.clear();
scratch.extend_from_slice(src);
simd::unpremultiply_alpha_row(&mut scratch);
f(&scratch);
});
}
#[cfg(not(feature = "std"))]
fn unpremultiply_to_scratch(src: &[f32], f: impl FnOnce(&[f32])) {
let mut tmp = src.to_vec();
simd::unpremultiply_alpha_row(&mut tmp);
f(&tmp);
}
pub trait TransferCurve: Send + Sync + 'static {
type Luts: Send + Sync;
fn to_linear(&self, encoded: f32) -> f32;
#[allow(clippy::wrong_self_convention)]
fn from_linear(&self, linear: f32) -> f32;
fn is_identity(&self) -> bool {
false
}
fn build_luts(&self) -> Self::Luts;
fn u8_to_linear_f32(
&self,
src: &[u8],
dst: &mut [f32],
luts: &Self::Luts,
channels: usize,
has_alpha: bool,
premul: bool,
);
fn linear_f32_to_u8(
&self,
src: &[f32],
dst: &mut [u8],
luts: &Self::Luts,
channels: usize,
has_alpha: bool,
unpremul: bool,
);
fn u16_to_linear_f32(
&self,
src: &[u16],
dst: &mut [f32],
luts: &Self::Luts,
channels: usize,
has_alpha: bool,
premul: bool,
);
fn linear_f32_to_u16(
&self,
src: &[f32],
dst: &mut [u16],
luts: &Self::Luts,
channels: usize,
has_alpha: bool,
unpremul: bool,
);
fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], luts: &Self::Luts);
fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], luts: &Self::Luts);
fn f32_to_linear_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
premul: bool,
);
fn linear_to_f32_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
unpremul: bool,
);
}
#[derive(Debug, Clone, Copy, Default)]
pub struct NoTransfer;
impl TransferCurve for NoTransfer {
type Luts = ();
#[inline]
fn to_linear(&self, v: f32) -> f32 {
v
}
#[inline]
fn from_linear(&self, v: f32) -> f32 {
v
}
#[inline]
fn is_identity(&self) -> bool {
true
}
#[inline]
fn build_luts(&self) -> Self::Luts {}
fn u8_to_linear_f32(
&self,
src: &[u8],
dst: &mut [f32],
_luts: &(),
_channels: usize,
_has_alpha: bool,
premul: bool,
) {
simd::u8_to_f32_row(src, dst);
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u8(
&self,
src: &[f32],
dst: &mut [u8],
_luts: &(),
_channels: usize,
_has_alpha: bool,
unpremul: bool,
) {
if unpremul {
unpremultiply_to_scratch(src, |s| simd::f32_to_u8_row(s, dst));
} else {
simd::f32_to_u8_row(src, dst);
}
}
fn u16_to_linear_f32(
&self,
src: &[u16],
dst: &mut [f32],
_luts: &(),
_channels: usize,
_has_alpha: bool,
premul: bool,
) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = *s as f32 / 65535.0;
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u16(
&self,
src: &[f32],
dst: &mut [u16],
_luts: &(),
_channels: usize,
_has_alpha: bool,
unpremul: bool,
) {
if unpremul {
unpremultiply_to_scratch(src, |s| {
for (sv, d) in s.iter().zip(dst.iter_mut()) {
*d = (*sv * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
});
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (*s * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
}
}
fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = ((*s as u32 * 4095 + 127) / 255) as i16;
}
}
fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let clamped = (*s).clamp(0, 4095) as u32;
*d = ((clamped * 255 + 2047) / 4095) as u8;
}
}
fn f32_to_linear_inplace(
&self,
row: &mut [f32],
_channels: usize,
_has_alpha: bool,
premul: bool,
) {
if premul {
simd::premultiply_alpha_row(row);
}
}
fn linear_to_f32_inplace(
&self,
row: &mut [f32],
_channels: usize,
_has_alpha: bool,
unpremul: bool,
) {
if unpremul {
simd::unpremultiply_alpha_row(row);
}
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Srgb;
impl TransferCurve for Srgb {
type Luts = ();
#[inline]
fn to_linear(&self, encoded: f32) -> f32 {
fastmath::srgb_to_linear(encoded)
}
#[inline]
fn from_linear(&self, linear: f32) -> f32 {
fastmath::srgb_from_linear(linear)
}
#[inline]
fn build_luts(&self) -> Self::Luts {
}
fn u8_to_linear_f32(
&self,
src: &[u8],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
color::srgb_u8_to_linear_f32(src, dst, channels, has_alpha);
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u8(
&self,
src: &[f32],
dst: &mut [u8],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
if unpremul {
unpremultiply_to_scratch(src, |s| {
color::linear_f32_to_srgb_u8(s, dst, channels, has_alpha);
});
} else {
color::linear_f32_to_srgb_u8(src, dst, channels, has_alpha);
}
}
fn u16_to_linear_f32(
&self,
src: &[u16],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
use linear_srgb::default::{srgb_u16_to_linear, srgb_u16_to_linear_rgba_slice};
if has_alpha && channels == 4 {
srgb_u16_to_linear_rgba_slice(src, dst);
} else if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = srgb_u16_to_linear(src_px[i]);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = srgb_u16_to_linear(*s);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u16(
&self,
src: &[f32],
dst: &mut [u16],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u16]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
.clamp(0.0, 65535.0) as u16;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
color::srgb_u8_to_linear_i12_row(src, dst);
}
fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
color::linear_i12_to_srgb_u8_row(src, dst);
}
fn f32_to_linear_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
premul: bool,
) {
simd::srgb_to_linear_row(row, channels, has_alpha);
if premul {
simd::premultiply_alpha_row(row);
}
}
fn linear_to_f32_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
if unpremul {
simd::unpremultiply_alpha_row(row);
}
simd::srgb_from_linear_row(row, channels, has_alpha);
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Bt709;
impl TransferCurve for Bt709 {
type Luts = ();
#[inline]
fn to_linear(&self, v: f32) -> f32 {
fastmath::bt709_to_linear(v)
}
#[inline]
fn from_linear(&self, v: f32) -> f32 {
fastmath::bt709_from_linear(v)
}
fn build_luts(&self) -> Self::Luts {}
fn u8_to_linear_f32(
&self,
src: &[u8],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = self.to_linear(*s as f32 / 255.0);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u8(
&self,
src: &[f32],
dst: &mut [u8],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u8]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] =
(self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u16_to_linear_f32(
&self,
src: &[u16],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = self.to_linear(*s as f32 / 65535.0);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u16(
&self,
src: &[f32],
dst: &mut [u16],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u16]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
.clamp(0.0, 65535.0) as u16;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let linear = self.to_linear(*s as f32 / 255.0);
*d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
}
}
fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
*d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
}
fn f32_to_linear_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
premul: bool,
) {
simd::bt709_to_linear_row(row, channels, has_alpha);
if premul {
simd::premultiply_alpha_row(row);
}
}
fn linear_to_f32_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
if unpremul {
simd::unpremultiply_alpha_row(row);
}
simd::bt709_from_linear_row(row, channels, has_alpha);
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Pq;
impl TransferCurve for Pq {
type Luts = ();
#[inline]
fn to_linear(&self, v: f32) -> f32 {
fastmath::pq_to_linear(v)
}
#[inline]
fn from_linear(&self, v: f32) -> f32 {
fastmath::pq_from_linear(v)
}
fn build_luts(&self) -> Self::Luts {}
fn u8_to_linear_f32(
&self,
src: &[u8],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = self.to_linear(*s as f32 / 255.0);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u8(
&self,
src: &[f32],
dst: &mut [u8],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u8]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] =
(self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u16_to_linear_f32(
&self,
src: &[u16],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = self.to_linear(*s as f32 / 65535.0);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u16(
&self,
src: &[f32],
dst: &mut [u16],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u16]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
.clamp(0.0, 65535.0) as u16;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let linear = self.to_linear(*s as f32 / 255.0);
*d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
}
}
fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
*d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
}
fn f32_to_linear_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
premul: bool,
) {
simd::pq_to_linear_row(row, channels, has_alpha);
if premul {
simd::premultiply_alpha_row(row);
}
}
fn linear_to_f32_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
if unpremul {
simd::unpremultiply_alpha_row(row);
}
simd::pq_from_linear_row(row, channels, has_alpha);
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Hlg;
impl TransferCurve for Hlg {
type Luts = ();
#[inline]
fn to_linear(&self, v: f32) -> f32 {
fastmath::hlg_to_linear(v)
}
#[inline]
fn from_linear(&self, v: f32) -> f32 {
fastmath::hlg_from_linear(v)
}
fn build_luts(&self) -> Self::Luts {}
fn u8_to_linear_f32(
&self,
src: &[u8],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = self.to_linear(*s as f32 / 255.0);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u8(
&self,
src: &[f32],
dst: &mut [u8],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u8]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] =
(self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u16_to_linear_f32(
&self,
src: &[u16],
dst: &mut [f32],
_luts: &(),
channels: usize,
has_alpha: bool,
premul: bool,
) {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
}
dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = self.to_linear(*s as f32 / 65535.0);
}
}
if premul {
simd::premultiply_alpha_row(dst);
}
}
fn linear_f32_to_u16(
&self,
src: &[f32],
dst: &mut [u16],
_luts: &(),
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
let encode = |src: &[f32], dst: &mut [u16]| {
if has_alpha && channels >= 2 {
for (src_px, dst_px) in src
.chunks_exact(channels)
.zip(dst.chunks_exact_mut(channels))
{
for i in 0..channels - 1 {
dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
.clamp(0.0, 65535.0) as u16;
}
dst_px[channels - 1] =
(src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
} else {
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
}
}
};
if unpremul {
unpremultiply_to_scratch(src, |s| encode(s, dst));
} else {
encode(src, dst);
}
}
fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let linear = self.to_linear(*s as f32 / 255.0);
*d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
}
}
fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
for (s, d) in src.iter().zip(dst.iter_mut()) {
let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
*d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
}
}
fn f32_to_linear_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
premul: bool,
) {
simd::hlg_to_linear_row(row, channels, has_alpha);
if premul {
simd::premultiply_alpha_row(row);
}
}
fn linear_to_f32_inplace(
&self,
row: &mut [f32],
channels: usize,
has_alpha: bool,
unpremul: bool,
) {
if unpremul {
simd::unpremultiply_alpha_row(row);
}
simd::hlg_from_linear_row(row, channels, has_alpha);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
#[test]
fn no_transfer_roundtrip_u8() {
let tf = NoTransfer;
tf.build_luts();
let src: Vec<u8> = (0..=255).collect();
let mut f32_buf = vec![0.0f32; 256];
let mut out = vec![0u8; 256];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
for i in 0..256 {
assert_eq!(src[i], out[i], "NoTransfer roundtrip mismatch at {}", i);
}
}
#[test]
fn srgb_roundtrip_u8() {
let tf = Srgb;
tf.build_luts();
let src: Vec<u8> = (0..=255).collect();
let mut f32_buf = vec![0.0f32; 256];
let mut out = vec![0u8; 256];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
for i in 0..256 {
let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
assert!(diff <= 1, "sRGB roundtrip off by {} at {}", diff, i);
}
}
#[test]
fn srgb_roundtrip_u16() {
let tf = Srgb;
tf.build_luts();
let values: Vec<u16> = (0..=65535).step_by(257).collect(); let mut f32_buf = vec![0.0f32; values.len()];
let mut out = vec![0u16; values.len()];
tf.u16_to_linear_f32(&values, &mut f32_buf, &(), 1, false, false);
tf.linear_f32_to_u16(&f32_buf, &mut out, &(), 1, false, false);
for i in 0..values.len() {
let diff = (values[i] as i32 - out[i] as i32).unsigned_abs();
assert!(
diff <= 1,
"sRGB u16 roundtrip off by {} at value {}: {} -> {} -> {}",
diff,
values[i],
values[i],
f32_buf[i],
out[i]
);
}
}
#[test]
fn srgb_i12_matches_existing() {
let tf = Srgb;
tf.build_luts();
let src: Vec<u8> = (0..=255).collect();
let mut via_tf = vec![0i16; 256];
let mut via_direct = vec![0i16; 256];
tf.u8_to_linear_i12(&src, &mut via_tf, &());
crate::color::srgb_u8_to_linear_i12_row(&src, &mut via_direct);
assert_eq!(via_tf, via_direct, "TF i12 path should match direct LUT");
}
#[test]
fn srgb_scalar_matches_lut() {
let tf = Srgb;
for i in 0..=255u8 {
let from_scalar = tf.to_linear(i as f32 / 255.0);
let mut f32_buf = [0.0f32];
crate::color::srgb_u8_to_linear_f32(&[i], &mut f32_buf, 1, false);
let diff = (from_scalar - f32_buf[0]).abs();
assert!(
diff < 1e-5,
"sRGB scalar vs LUT mismatch at {}: scalar={}, lut={}",
i,
from_scalar,
f32_buf[0]
);
}
}
#[test]
fn no_transfer_identity() {
let tf = NoTransfer;
assert!(tf.is_identity());
assert_eq!(tf.to_linear(0.5), 0.5);
assert_eq!(tf.from_linear(0.5), 0.5);
}
#[test]
fn srgb_not_identity() {
let tf = Srgb;
assert!(!tf.is_identity());
let linear = tf.to_linear(0.5);
assert!(
(linear - 0.214).abs() < 0.01,
"sRGB 0.5 → linear = {} (expected ~0.214)",
linear
);
}
#[test]
fn srgb_u8_to_f32_with_alpha() {
let tf = Srgb;
tf.build_luts();
let src = [128u8, 64, 32, 200];
let mut dst = [0.0f32; 4];
tf.u8_to_linear_f32(&src, &mut dst, &(), 4, true, false);
assert!(dst[0] > 0.2 && dst[0] < 0.3, "R linear: {}", dst[0]);
assert!((dst[3] - 200.0 / 255.0).abs() < 0.01, "A: {}", dst[3]);
}
#[test]
fn srgb_u8_premul_unpremul_roundtrip() {
let tf = Srgb;
tf.build_luts();
let src = [128u8, 64, 32, 200];
let mut f32_buf = [0.0f32; 4];
let mut out = [0u8; 4];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 4, true, true);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 4, true, true);
for i in 0..4 {
let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
assert!(
diff <= 1,
"Premul roundtrip off by {} at channel {}: {} vs {}",
diff,
i,
src[i],
out[i]
);
}
}
#[test]
fn no_alpha_3ch_roundtrip() {
let tf = Srgb;
tf.build_luts();
let src = [128u8, 64, 32, 200, 100, 50]; let mut f32_buf = [0.0f32; 6];
let mut out = [0u8; 6];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 3, false, false);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 3, false, false);
for i in 0..6 {
let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
assert!(
diff <= 1,
"3ch roundtrip off by {} at {}: {} vs {}",
diff,
i,
src[i],
out[i]
);
}
}
#[test]
fn bt709_roundtrip_u8() {
let tf = Bt709;
tf.build_luts();
let src: Vec<u8> = (0..=255).collect();
let mut f32_buf = vec![0.0f32; 256];
let mut out = vec![0u8; 256];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
for i in 0..256 {
let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
assert!(diff <= 1, "BT.709 roundtrip off by {} at {}", diff, i);
}
}
#[test]
fn bt709_monotonic() {
let tf = Bt709;
let mut prev = 0.0f32;
for i in 0..=255u8 {
let linear = tf.to_linear(i as f32 / 255.0);
assert!(
linear >= prev,
"BT.709 to_linear not monotonic at {}: {} < {}",
i,
linear,
prev
);
prev = linear;
}
}
#[test]
fn bt709_endpoints() {
let tf = Bt709;
assert!((tf.to_linear(0.0)).abs() < 1e-7);
assert!((tf.to_linear(1.0) - 1.0).abs() < 1e-5);
assert!((tf.from_linear(0.0)).abs() < 1e-7);
assert!((tf.from_linear(1.0) - 1.0).abs() < 1e-5);
}
#[test]
fn pq_roundtrip_u8() {
let tf = Pq;
tf.build_luts();
let src: Vec<u8> = (0..=255).collect();
let mut f32_buf = vec![0.0f32; 256];
let mut out = vec![0u8; 256];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
for i in 0..256 {
let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
assert!(diff <= 1, "PQ roundtrip off by {} at {}", diff, i);
}
}
#[test]
fn pq_monotonic() {
let tf = Pq;
let mut prev = 0.0f32;
for i in 0..=255u8 {
let linear = tf.to_linear(i as f32 / 255.0);
assert!(
linear >= prev,
"PQ to_linear not monotonic at {}: {} < {}",
i,
linear,
prev
);
prev = linear;
}
}
#[test]
fn pq_endpoints() {
let tf = Pq;
assert!((tf.to_linear(0.0)).abs() < 1e-7);
assert!((tf.to_linear(1.0) - 1.0).abs() < 1e-4);
assert!((tf.from_linear(0.0)).abs() < 1e-7);
assert!((tf.from_linear(1.0) - 1.0).abs() < 1e-4);
}
#[test]
fn hlg_roundtrip_u8() {
let tf = Hlg;
tf.build_luts();
let src: Vec<u8> = (0..=255).collect();
let mut f32_buf = vec![0.0f32; 256];
let mut out = vec![0u8; 256];
tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
for i in 0..256 {
let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
assert!(diff <= 1, "HLG roundtrip off by {} at {}", diff, i);
}
}
#[test]
fn hlg_monotonic() {
let tf = Hlg;
let mut prev = 0.0f32;
for i in 0..=255u8 {
let linear = tf.to_linear(i as f32 / 255.0);
assert!(
linear >= prev,
"HLG to_linear not monotonic at {}: {} < {}",
i,
linear,
prev
);
prev = linear;
}
}
#[test]
fn hlg_endpoints() {
let tf = Hlg;
assert!((tf.to_linear(0.0)).abs() < 1e-7);
let at_one = tf.to_linear(1.0);
assert!(at_one > 0.0, "HLG(1.0) should be positive: {}", at_one);
let back = tf.from_linear(at_one);
assert!(
(back - 1.0).abs() < 1e-5,
"HLG roundtrip at 1.0: {} -> {} -> {}",
1.0,
at_one,
back
);
}
#[test]
fn cicp_transfer_known_codes() {
use zenpixels::TransferFunction;
assert_eq!(
TransferFunction::from_cicp(1),
Some(TransferFunction::Bt709)
);
assert_eq!(
TransferFunction::from_cicp(6),
Some(TransferFunction::Bt709)
);
assert_eq!(
TransferFunction::from_cicp(8),
Some(TransferFunction::Linear)
);
assert_eq!(
TransferFunction::from_cicp(13),
Some(TransferFunction::Srgb)
);
assert_eq!(TransferFunction::from_cicp(16), Some(TransferFunction::Pq));
assert_eq!(TransferFunction::from_cicp(18), Some(TransferFunction::Hlg));
}
#[test]
fn cicp_transfer_unknown_codes() {
use zenpixels::TransferFunction;
assert_eq!(TransferFunction::from_cicp(0), None);
assert_eq!(TransferFunction::from_cicp(2), None);
assert_eq!(TransferFunction::from_cicp(255), None);
}
}