Skip to main content

zenresize/
transfer.rs

1//! Transfer function traits and implementations.
2//!
3//! A transfer function (TF) maps between a perceptual/encoded space and linear light.
4//! sRGB, PQ (HDR10), HLG, and gamma curves are all transfer functions.
5//!
6//! The [`TransferCurve`] trait provides batch methods for converting pixel rows
7//! between encoded and working space. Implementations fuse linearization with
8//! premultiply/unpremultiply for efficiency.
9//!
10//! # Standard implementations
11//!
12//! - [`NoTransfer`] — Identity (no conversion). Zero overhead.
13//! - [`Srgb`] — sRGB gamma curve. Uses compile-time LUTs for u8 and
14//!   the `linear-srgb` crate's SIMD-friendly LUTs for f32.
15//!
16//! # LUT caching
17//!
18//! Standard TFs permanently cache runtime LUTs via `OnceLock` (with the `std` feature).
19//! For `Srgb`, the u8↔i12 tables are compile-time constants — no runtime allocation.
20
21#[cfg(not(feature = "std"))]
22use alloc::{vec, vec::Vec};
23#[cfg(feature = "std")]
24use core::cell::RefCell;
25
26use crate::color;
27use crate::fastmath;
28use crate::simd;
29
30// =============================================================================
31// Scratch buffer for unpremultiply (avoids per-row heap allocation)
32// =============================================================================
33
34/// Prepare an unpremultiplied copy of `src` without per-call allocation.
35///
36/// With `std`, reuses a thread-local scratch buffer that grows to the
37/// high-water mark and stays allocated for the thread's lifetime.
38/// Without `std`, falls back to `src.to_vec()`.
39#[cfg(feature = "std")]
40fn unpremultiply_to_scratch(src: &[f32], f: impl FnOnce(&[f32])) {
41    thread_local! {
42        static SCRATCH: RefCell<Vec<f32>> = const { RefCell::new(Vec::new()) };
43    }
44    SCRATCH.with(|cell| {
45        let mut scratch = cell.borrow_mut();
46        scratch.clear();
47        scratch.extend_from_slice(src);
48        simd::unpremultiply_alpha_row(&mut scratch);
49        f(&scratch);
50    });
51}
52
53#[cfg(not(feature = "std"))]
54fn unpremultiply_to_scratch(src: &[f32], f: impl FnOnce(&[f32])) {
55    let mut tmp = src.to_vec();
56    simd::unpremultiply_alpha_row(&mut tmp);
57    f(&tmp);
58}
59
60// =============================================================================
61// TransferCurve trait
62// =============================================================================
63
64/// Transfer function for encoding/decoding pixel values to/from linear light.
65///
66/// Provides batch scanline methods for converting between encoded element types
67/// and the working-space type used during filtering. The `channels` parameter
68/// and `premul`/`unpremul` flags handle arbitrary layouts — RGB (3ch, no alpha),
69/// RGBA (4ch, alpha last), CMYK (4ch, no alpha), Gray (1ch), etc.
70///
71/// # Implementing a custom TF
72///
73/// Implement `to_linear` and `from_linear` for the scalar curve. The batch
74/// methods have default implementations that call these in a loop, but
75/// optimized implementations should override the batch methods with LUT-based
76/// or SIMD-based versions.
77pub trait TransferCurve: Send + Sync + 'static {
78    /// Cached state (LUTs, etc.). Built once by `build_luts()`, passed to batch methods.
79    ///
80    /// Standard TFs use `&'static RuntimeLuts` (permanently cached via `OnceLock`).
81    /// Custom TFs can use owned `RuntimeLuts` or `()`.
82    type Luts: Send + Sync;
83
84    // --- Scalar (for LUT building and testing) ---
85
86    /// Encode a linear-light value to this TF's encoded space.
87    /// Both input and output are in [0, 1] for SDR TFs.
88    fn to_linear(&self, encoded: f32) -> f32;
89
90    /// Decode a value from this TF's encoded space to linear light.
91    #[allow(clippy::wrong_self_convention)]
92    fn from_linear(&self, linear: f32) -> f32;
93
94    /// Whether this TF is the identity (no conversion needed).
95    fn is_identity(&self) -> bool {
96        false
97    }
98
99    /// Build or retrieve cached LUTs.
100    fn build_luts(&self) -> Self::Luts;
101
102    // --- Batch: u8 encoded → working f32 ---
103
104    /// Convert a row of u8 encoded pixels to premultiplied linear f32.
105    ///
106    /// Fuses linearize + premultiply. If `premul` is false, only linearizes.
107    /// `channels` is the number of components per pixel (1, 3, or 4).
108    /// Alpha (if present) is assumed to be the last channel and is NOT linearized
109    /// — it's scaled linearly (v/255).
110    fn u8_to_linear_f32(
111        &self,
112        src: &[u8],
113        dst: &mut [f32],
114        luts: &Self::Luts,
115        channels: usize,
116        has_alpha: bool,
117        premul: bool,
118    );
119
120    /// Convert a row of premultiplied linear f32 to u8 encoded pixels.
121    ///
122    /// Fuses unpremultiply + delinearize + quantize. Clamps to [0, 255].
123    fn linear_f32_to_u8(
124        &self,
125        src: &[f32],
126        dst: &mut [u8],
127        luts: &Self::Luts,
128        channels: usize,
129        has_alpha: bool,
130        unpremul: bool,
131    );
132
133    // --- Batch: u16 encoded ↔ working f32 ---
134
135    /// Convert a row of u16 encoded pixels to premultiplied linear f32.
136    ///
137    /// Values span full 0-65535 range. Alpha (if present, last channel)
138    /// is scaled linearly (v/65535). Non-alpha channels go through the TF.
139    fn u16_to_linear_f32(
140        &self,
141        src: &[u16],
142        dst: &mut [f32],
143        luts: &Self::Luts,
144        channels: usize,
145        has_alpha: bool,
146        premul: bool,
147    );
148
149    /// Convert premultiplied linear f32 to u16 encoded pixels.
150    fn linear_f32_to_u16(
151        &self,
152        src: &[f32],
153        dst: &mut [u16],
154        luts: &Self::Luts,
155        channels: usize,
156        has_alpha: bool,
157        unpremul: bool,
158    );
159
160    // --- Batch: u8 encoded → i12 linear (for I16Work path) ---
161
162    /// Convert a row of u8 encoded pixels to linear i12 (0-4095).
163    /// All channels converted; no premul/unpremul (I16Work doesn't support premul).
164    fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], luts: &Self::Luts);
165
166    /// Convert a row of linear i12 to u8 encoded pixels.
167    fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], luts: &Self::Luts);
168
169    // --- Batch: f32 encoded ↔ linear f32 (in-place) ---
170
171    /// Convert f32 encoded values to premultiplied linear f32 in-place.
172    fn f32_to_linear_inplace(
173        &self,
174        row: &mut [f32],
175        channels: usize,
176        has_alpha: bool,
177        premul: bool,
178    );
179
180    /// Convert premultiplied linear f32 to f32 encoded in-place.
181    /// Does NOT clamp — output can have values outside [0, 1] for wide gamut.
182    fn linear_to_f32_inplace(
183        &self,
184        row: &mut [f32],
185        channels: usize,
186        has_alpha: bool,
187        unpremul: bool,
188    );
189}
190
191// =============================================================================
192// NoTransfer — identity transfer function
193// =============================================================================
194
195/// Identity transfer function: no conversion.
196///
197/// Used when pixel data is already in the desired space, or when
198/// resizing should happen in the encoded domain (gamma-space resize).
199/// All batch methods are trivial copies/scales.
200#[derive(Debug, Clone, Copy, Default)]
201pub struct NoTransfer;
202
203impl TransferCurve for NoTransfer {
204    type Luts = ();
205
206    #[inline]
207    fn to_linear(&self, v: f32) -> f32 {
208        v
209    }
210
211    #[inline]
212    fn from_linear(&self, v: f32) -> f32 {
213        v
214    }
215
216    #[inline]
217    fn is_identity(&self) -> bool {
218        true
219    }
220
221    #[inline]
222    fn build_luts(&self) -> Self::Luts {}
223
224    fn u8_to_linear_f32(
225        &self,
226        src: &[u8],
227        dst: &mut [f32],
228        _luts: &(),
229        _channels: usize,
230        _has_alpha: bool,
231        premul: bool,
232    ) {
233        simd::u8_to_f32_row(src, dst);
234        if premul {
235            simd::premultiply_alpha_row(dst);
236        }
237    }
238
239    fn linear_f32_to_u8(
240        &self,
241        src: &[f32],
242        dst: &mut [u8],
243        _luts: &(),
244        _channels: usize,
245        _has_alpha: bool,
246        unpremul: bool,
247    ) {
248        if unpremul {
249            unpremultiply_to_scratch(src, |s| simd::f32_to_u8_row(s, dst));
250        } else {
251            simd::f32_to_u8_row(src, dst);
252        }
253    }
254
255    fn u16_to_linear_f32(
256        &self,
257        src: &[u16],
258        dst: &mut [f32],
259        _luts: &(),
260        _channels: usize,
261        _has_alpha: bool,
262        premul: bool,
263    ) {
264        for (s, d) in src.iter().zip(dst.iter_mut()) {
265            *d = *s as f32 / 65535.0;
266        }
267        if premul {
268            simd::premultiply_alpha_row(dst);
269        }
270    }
271
272    fn linear_f32_to_u16(
273        &self,
274        src: &[f32],
275        dst: &mut [u16],
276        _luts: &(),
277        _channels: usize,
278        _has_alpha: bool,
279        unpremul: bool,
280    ) {
281        if unpremul {
282            unpremultiply_to_scratch(src, |s| {
283                for (sv, d) in s.iter().zip(dst.iter_mut()) {
284                    *d = (*sv * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
285                }
286            });
287        } else {
288            for (s, d) in src.iter().zip(dst.iter_mut()) {
289                *d = (*s * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
290            }
291        }
292    }
293
294    fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
295        // Identity: scale u8 (0-255) → i12 (0-4095)
296        for (s, d) in src.iter().zip(dst.iter_mut()) {
297            // Exact: (v * 4095 + 127) / 255
298            *d = ((*s as u32 * 4095 + 127) / 255) as i16;
299        }
300    }
301
302    fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
303        // Identity: scale i12 (0-4095) → u8 (0-255)
304        for (s, d) in src.iter().zip(dst.iter_mut()) {
305            let clamped = (*s).clamp(0, 4095) as u32;
306            *d = ((clamped * 255 + 2047) / 4095) as u8;
307        }
308    }
309
310    fn f32_to_linear_inplace(
311        &self,
312        row: &mut [f32],
313        _channels: usize,
314        _has_alpha: bool,
315        premul: bool,
316    ) {
317        if premul {
318            simd::premultiply_alpha_row(row);
319        }
320    }
321
322    fn linear_to_f32_inplace(
323        &self,
324        row: &mut [f32],
325        _channels: usize,
326        _has_alpha: bool,
327        unpremul: bool,
328    ) {
329        if unpremul {
330            simd::unpremultiply_alpha_row(row);
331        }
332    }
333}
334
335// =============================================================================
336// Srgb — sRGB transfer function
337// =============================================================================
338
339/// sRGB transfer function.
340///
341/// Uses the `linear-srgb` crate's LUT-based conversion for u8↔f32 and
342/// compile-time constant tables for u8↔i12. The u8 path is heavily optimized
343/// with SIMD-friendly LUT lookups.
344///
345/// For u16 input, values are normalized to [0, 1] and passed through the
346/// scalar sRGB curve. This is slower than the u8 LUT path but exact.
347#[derive(Debug, Clone, Copy, Default)]
348pub struct Srgb;
349
350impl TransferCurve for Srgb {
351    type Luts = ();
352
353    #[inline]
354    fn to_linear(&self, encoded: f32) -> f32 {
355        fastmath::srgb_to_linear(encoded)
356    }
357
358    #[inline]
359    fn from_linear(&self, linear: f32) -> f32 {
360        fastmath::srgb_from_linear(linear)
361    }
362
363    #[inline]
364    fn build_luts(&self) -> Self::Luts {
365        // Srgb uses compile-time LUTs and the linear-srgb crate — no runtime LUTs needed.
366    }
367
368    fn u8_to_linear_f32(
369        &self,
370        src: &[u8],
371        dst: &mut [f32],
372        _luts: &(),
373        channels: usize,
374        has_alpha: bool,
375        premul: bool,
376    ) {
377        color::srgb_u8_to_linear_f32(src, dst, channels, has_alpha);
378        if premul {
379            simd::premultiply_alpha_row(dst);
380        }
381    }
382
383    fn linear_f32_to_u8(
384        &self,
385        src: &[f32],
386        dst: &mut [u8],
387        _luts: &(),
388        channels: usize,
389        has_alpha: bool,
390        unpremul: bool,
391    ) {
392        if unpremul {
393            unpremultiply_to_scratch(src, |s| {
394                color::linear_f32_to_srgb_u8(s, dst, channels, has_alpha);
395            });
396        } else {
397            color::linear_f32_to_srgb_u8(src, dst, channels, has_alpha);
398        }
399    }
400
401    fn u16_to_linear_f32(
402        &self,
403        src: &[u16],
404        dst: &mut [f32],
405        _luts: &(),
406        channels: usize,
407        has_alpha: bool,
408        premul: bool,
409    ) {
410        // Use linear-srgb's 65536-entry const LUT for u16 sRGB decode.
411        use linear_srgb::default::{srgb_u16_to_linear, srgb_u16_to_linear_rgba_slice};
412
413        if has_alpha && channels == 4 {
414            srgb_u16_to_linear_rgba_slice(src, dst);
415        } else if has_alpha && channels >= 2 {
416            for (src_px, dst_px) in src
417                .chunks_exact(channels)
418                .zip(dst.chunks_exact_mut(channels))
419            {
420                for i in 0..channels - 1 {
421                    dst_px[i] = srgb_u16_to_linear(src_px[i]);
422                }
423                dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
424            }
425        } else {
426            for (s, d) in src.iter().zip(dst.iter_mut()) {
427                *d = srgb_u16_to_linear(*s);
428            }
429        }
430        if premul {
431            simd::premultiply_alpha_row(dst);
432        }
433    }
434
435    fn linear_f32_to_u16(
436        &self,
437        src: &[f32],
438        dst: &mut [u16],
439        _luts: &(),
440        channels: usize,
441        has_alpha: bool,
442        unpremul: bool,
443    ) {
444        // Encode uses the rational polynomial (not the LUT) for perfect roundtrip
445        // accuracy with the LUT decode path. The polynomial is 10× slower than the
446        // LUT but encode is rarely the bottleneck — decode dominates in pipelines
447        // that read u16 images (camera RAW, TIFF, PNG16).
448        let encode = |src: &[f32], dst: &mut [u16]| {
449            if has_alpha && channels >= 2 {
450                for (src_px, dst_px) in src
451                    .chunks_exact(channels)
452                    .zip(dst.chunks_exact_mut(channels))
453                {
454                    for i in 0..channels - 1 {
455                        dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
456                            .clamp(0.0, 65535.0) as u16;
457                    }
458                    dst_px[channels - 1] =
459                        (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
460                }
461            } else {
462                for (s, d) in src.iter().zip(dst.iter_mut()) {
463                    *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
464                }
465            }
466        };
467
468        if unpremul {
469            unpremultiply_to_scratch(src, |s| encode(s, dst));
470        } else {
471            encode(src, dst);
472        }
473    }
474
475    fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
476        color::srgb_u8_to_linear_i12_row(src, dst);
477    }
478
479    fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
480        color::linear_i12_to_srgb_u8_row(src, dst);
481    }
482
483    fn f32_to_linear_inplace(
484        &self,
485        row: &mut [f32],
486        channels: usize,
487        has_alpha: bool,
488        premul: bool,
489    ) {
490        simd::srgb_to_linear_row(row, channels, has_alpha);
491        if premul {
492            simd::premultiply_alpha_row(row);
493        }
494    }
495
496    fn linear_to_f32_inplace(
497        &self,
498        row: &mut [f32],
499        channels: usize,
500        has_alpha: bool,
501        unpremul: bool,
502    ) {
503        if unpremul {
504            simd::unpremultiply_alpha_row(row);
505        }
506        simd::srgb_from_linear_row(row, channels, has_alpha);
507    }
508}
509
510// =============================================================================
511// Bt709 — BT.709/BT.601 transfer function
512// =============================================================================
513
514/// BT.709 transfer function (also used for BT.601).
515///
516/// Close to sRGB but with a different linear toe segment. For u8 input the
517/// difference from sRGB is small enough that LUT-based fast paths are not
518/// provided — the scalar curve is used via the default batch implementations.
519#[derive(Debug, Clone, Copy, Default)]
520pub struct Bt709;
521
522// BT.709 constants are in fastmath.rs (used by fast polynomial approximation).
523
524impl TransferCurve for Bt709 {
525    type Luts = ();
526
527    #[inline]
528    fn to_linear(&self, v: f32) -> f32 {
529        fastmath::bt709_to_linear(v)
530    }
531
532    #[inline]
533    fn from_linear(&self, v: f32) -> f32 {
534        fastmath::bt709_from_linear(v)
535    }
536
537    fn build_luts(&self) -> Self::Luts {}
538
539    fn u8_to_linear_f32(
540        &self,
541        src: &[u8],
542        dst: &mut [f32],
543        _luts: &(),
544        channels: usize,
545        has_alpha: bool,
546        premul: bool,
547    ) {
548        if has_alpha && channels >= 2 {
549            for (src_px, dst_px) in src
550                .chunks_exact(channels)
551                .zip(dst.chunks_exact_mut(channels))
552            {
553                for i in 0..channels - 1 {
554                    dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
555                }
556                dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
557            }
558        } else {
559            for (s, d) in src.iter().zip(dst.iter_mut()) {
560                *d = self.to_linear(*s as f32 / 255.0);
561            }
562        }
563        if premul {
564            simd::premultiply_alpha_row(dst);
565        }
566    }
567
568    fn linear_f32_to_u8(
569        &self,
570        src: &[f32],
571        dst: &mut [u8],
572        _luts: &(),
573        channels: usize,
574        has_alpha: bool,
575        unpremul: bool,
576    ) {
577        let encode = |src: &[f32], dst: &mut [u8]| {
578            if has_alpha && channels >= 2 {
579                for (src_px, dst_px) in src
580                    .chunks_exact(channels)
581                    .zip(dst.chunks_exact_mut(channels))
582                {
583                    for i in 0..channels - 1 {
584                        dst_px[i] =
585                            (self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
586                    }
587                    dst_px[channels - 1] =
588                        (src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
589                }
590            } else {
591                for (s, d) in src.iter().zip(dst.iter_mut()) {
592                    *d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
593                }
594            }
595        };
596
597        if unpremul {
598            unpremultiply_to_scratch(src, |s| encode(s, dst));
599        } else {
600            encode(src, dst);
601        }
602    }
603
604    fn u16_to_linear_f32(
605        &self,
606        src: &[u16],
607        dst: &mut [f32],
608        _luts: &(),
609        channels: usize,
610        has_alpha: bool,
611        premul: bool,
612    ) {
613        if has_alpha && channels >= 2 {
614            for (src_px, dst_px) in src
615                .chunks_exact(channels)
616                .zip(dst.chunks_exact_mut(channels))
617            {
618                for i in 0..channels - 1 {
619                    dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
620                }
621                dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
622            }
623        } else {
624            for (s, d) in src.iter().zip(dst.iter_mut()) {
625                *d = self.to_linear(*s as f32 / 65535.0);
626            }
627        }
628        if premul {
629            simd::premultiply_alpha_row(dst);
630        }
631    }
632
633    fn linear_f32_to_u16(
634        &self,
635        src: &[f32],
636        dst: &mut [u16],
637        _luts: &(),
638        channels: usize,
639        has_alpha: bool,
640        unpremul: bool,
641    ) {
642        let encode = |src: &[f32], dst: &mut [u16]| {
643            if has_alpha && channels >= 2 {
644                for (src_px, dst_px) in src
645                    .chunks_exact(channels)
646                    .zip(dst.chunks_exact_mut(channels))
647                {
648                    for i in 0..channels - 1 {
649                        dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
650                            .clamp(0.0, 65535.0) as u16;
651                    }
652                    dst_px[channels - 1] =
653                        (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
654                }
655            } else {
656                for (s, d) in src.iter().zip(dst.iter_mut()) {
657                    *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
658                }
659            }
660        };
661
662        if unpremul {
663            unpremultiply_to_scratch(src, |s| encode(s, dst));
664        } else {
665            encode(src, dst);
666        }
667    }
668
669    fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
670        for (s, d) in src.iter().zip(dst.iter_mut()) {
671            let linear = self.to_linear(*s as f32 / 255.0);
672            *d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
673        }
674    }
675
676    fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
677        for (s, d) in src.iter().zip(dst.iter_mut()) {
678            let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
679            *d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
680        }
681    }
682
683    fn f32_to_linear_inplace(
684        &self,
685        row: &mut [f32],
686        channels: usize,
687        has_alpha: bool,
688        premul: bool,
689    ) {
690        simd::bt709_to_linear_row(row, channels, has_alpha);
691        if premul {
692            simd::premultiply_alpha_row(row);
693        }
694    }
695
696    fn linear_to_f32_inplace(
697        &self,
698        row: &mut [f32],
699        channels: usize,
700        has_alpha: bool,
701        unpremul: bool,
702    ) {
703        if unpremul {
704            simd::unpremultiply_alpha_row(row);
705        }
706        simd::bt709_from_linear_row(row, channels, has_alpha);
707    }
708}
709
710// =============================================================================
711// Pq — SMPTE ST 2084 (PQ / HDR10) transfer function
712// =============================================================================
713
714/// SMPTE ST 2084 (PQ) transfer function for HDR10 content.
715///
716/// Maps normalized signal values [0, 1] to linear light [0, 10000] cd/m².
717/// For resize purposes, the pipeline works in normalized PQ signal space,
718/// so `to_linear` / `from_linear` map between [0, 1] signal and [0, 1]
719/// normalized linear (where 1.0 = 10000 cd/m²).
720#[derive(Debug, Clone, Copy, Default)]
721pub struct Pq;
722
723// PQ constants are in fastmath.rs (used by rational polynomial approximation).
724
725impl TransferCurve for Pq {
726    type Luts = ();
727
728    #[inline]
729    fn to_linear(&self, v: f32) -> f32 {
730        fastmath::pq_to_linear(v)
731    }
732
733    #[inline]
734    fn from_linear(&self, v: f32) -> f32 {
735        fastmath::pq_from_linear(v)
736    }
737
738    fn build_luts(&self) -> Self::Luts {}
739
740    fn u8_to_linear_f32(
741        &self,
742        src: &[u8],
743        dst: &mut [f32],
744        _luts: &(),
745        channels: usize,
746        has_alpha: bool,
747        premul: bool,
748    ) {
749        if has_alpha && channels >= 2 {
750            for (src_px, dst_px) in src
751                .chunks_exact(channels)
752                .zip(dst.chunks_exact_mut(channels))
753            {
754                for i in 0..channels - 1 {
755                    dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
756                }
757                dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
758            }
759        } else {
760            for (s, d) in src.iter().zip(dst.iter_mut()) {
761                *d = self.to_linear(*s as f32 / 255.0);
762            }
763        }
764        if premul {
765            simd::premultiply_alpha_row(dst);
766        }
767    }
768
769    fn linear_f32_to_u8(
770        &self,
771        src: &[f32],
772        dst: &mut [u8],
773        _luts: &(),
774        channels: usize,
775        has_alpha: bool,
776        unpremul: bool,
777    ) {
778        let encode = |src: &[f32], dst: &mut [u8]| {
779            if has_alpha && channels >= 2 {
780                for (src_px, dst_px) in src
781                    .chunks_exact(channels)
782                    .zip(dst.chunks_exact_mut(channels))
783                {
784                    for i in 0..channels - 1 {
785                        dst_px[i] =
786                            (self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
787                    }
788                    dst_px[channels - 1] =
789                        (src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
790                }
791            } else {
792                for (s, d) in src.iter().zip(dst.iter_mut()) {
793                    *d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
794                }
795            }
796        };
797
798        if unpremul {
799            unpremultiply_to_scratch(src, |s| encode(s, dst));
800        } else {
801            encode(src, dst);
802        }
803    }
804
805    fn u16_to_linear_f32(
806        &self,
807        src: &[u16],
808        dst: &mut [f32],
809        _luts: &(),
810        channels: usize,
811        has_alpha: bool,
812        premul: bool,
813    ) {
814        if has_alpha && channels >= 2 {
815            for (src_px, dst_px) in src
816                .chunks_exact(channels)
817                .zip(dst.chunks_exact_mut(channels))
818            {
819                for i in 0..channels - 1 {
820                    dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
821                }
822                dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
823            }
824        } else {
825            for (s, d) in src.iter().zip(dst.iter_mut()) {
826                *d = self.to_linear(*s as f32 / 65535.0);
827            }
828        }
829        if premul {
830            simd::premultiply_alpha_row(dst);
831        }
832    }
833
834    fn linear_f32_to_u16(
835        &self,
836        src: &[f32],
837        dst: &mut [u16],
838        _luts: &(),
839        channels: usize,
840        has_alpha: bool,
841        unpremul: bool,
842    ) {
843        let encode = |src: &[f32], dst: &mut [u16]| {
844            if has_alpha && channels >= 2 {
845                for (src_px, dst_px) in src
846                    .chunks_exact(channels)
847                    .zip(dst.chunks_exact_mut(channels))
848                {
849                    for i in 0..channels - 1 {
850                        dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
851                            .clamp(0.0, 65535.0) as u16;
852                    }
853                    dst_px[channels - 1] =
854                        (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
855                }
856            } else {
857                for (s, d) in src.iter().zip(dst.iter_mut()) {
858                    *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
859                }
860            }
861        };
862
863        if unpremul {
864            unpremultiply_to_scratch(src, |s| encode(s, dst));
865        } else {
866            encode(src, dst);
867        }
868    }
869
870    fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
871        for (s, d) in src.iter().zip(dst.iter_mut()) {
872            let linear = self.to_linear(*s as f32 / 255.0);
873            *d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
874        }
875    }
876
877    fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
878        for (s, d) in src.iter().zip(dst.iter_mut()) {
879            let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
880            *d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
881        }
882    }
883
884    fn f32_to_linear_inplace(
885        &self,
886        row: &mut [f32],
887        channels: usize,
888        has_alpha: bool,
889        premul: bool,
890    ) {
891        simd::pq_to_linear_row(row, channels, has_alpha);
892        if premul {
893            simd::premultiply_alpha_row(row);
894        }
895    }
896
897    fn linear_to_f32_inplace(
898        &self,
899        row: &mut [f32],
900        channels: usize,
901        has_alpha: bool,
902        unpremul: bool,
903    ) {
904        if unpremul {
905            simd::unpremultiply_alpha_row(row);
906        }
907        simd::pq_from_linear_row(row, channels, has_alpha);
908    }
909}
910
911// =============================================================================
912// Hlg — ARIB STD-B67 (HLG) transfer function
913// =============================================================================
914
915/// HLG (Hybrid Log-Gamma) transfer function for broadcast HDR.
916///
917/// The OETF maps linear light [0, 1] to signal [0, 1]. `to_linear` is the
918/// inverse OETF (signal → scene linear). `from_linear` is the OETF (scene
919/// linear → signal).
920#[derive(Debug, Clone, Copy, Default)]
921pub struct Hlg;
922
923// HLG constants are in fastmath.rs (used by fast log2/pow2 approximation).
924
925impl TransferCurve for Hlg {
926    type Luts = ();
927
928    #[inline]
929    fn to_linear(&self, v: f32) -> f32 {
930        fastmath::hlg_to_linear(v)
931    }
932
933    #[inline]
934    fn from_linear(&self, v: f32) -> f32 {
935        fastmath::hlg_from_linear(v)
936    }
937
938    fn build_luts(&self) -> Self::Luts {}
939
940    fn u8_to_linear_f32(
941        &self,
942        src: &[u8],
943        dst: &mut [f32],
944        _luts: &(),
945        channels: usize,
946        has_alpha: bool,
947        premul: bool,
948    ) {
949        if has_alpha && channels >= 2 {
950            for (src_px, dst_px) in src
951                .chunks_exact(channels)
952                .zip(dst.chunks_exact_mut(channels))
953            {
954                for i in 0..channels - 1 {
955                    dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
956                }
957                dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
958            }
959        } else {
960            for (s, d) in src.iter().zip(dst.iter_mut()) {
961                *d = self.to_linear(*s as f32 / 255.0);
962            }
963        }
964        if premul {
965            simd::premultiply_alpha_row(dst);
966        }
967    }
968
969    fn linear_f32_to_u8(
970        &self,
971        src: &[f32],
972        dst: &mut [u8],
973        _luts: &(),
974        channels: usize,
975        has_alpha: bool,
976        unpremul: bool,
977    ) {
978        let encode = |src: &[f32], dst: &mut [u8]| {
979            if has_alpha && channels >= 2 {
980                for (src_px, dst_px) in src
981                    .chunks_exact(channels)
982                    .zip(dst.chunks_exact_mut(channels))
983                {
984                    for i in 0..channels - 1 {
985                        dst_px[i] =
986                            (self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
987                    }
988                    dst_px[channels - 1] =
989                        (src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
990                }
991            } else {
992                for (s, d) in src.iter().zip(dst.iter_mut()) {
993                    *d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
994                }
995            }
996        };
997
998        if unpremul {
999            unpremultiply_to_scratch(src, |s| encode(s, dst));
1000        } else {
1001            encode(src, dst);
1002        }
1003    }
1004
1005    fn u16_to_linear_f32(
1006        &self,
1007        src: &[u16],
1008        dst: &mut [f32],
1009        _luts: &(),
1010        channels: usize,
1011        has_alpha: bool,
1012        premul: bool,
1013    ) {
1014        if has_alpha && channels >= 2 {
1015            for (src_px, dst_px) in src
1016                .chunks_exact(channels)
1017                .zip(dst.chunks_exact_mut(channels))
1018            {
1019                for i in 0..channels - 1 {
1020                    dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
1021                }
1022                dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
1023            }
1024        } else {
1025            for (s, d) in src.iter().zip(dst.iter_mut()) {
1026                *d = self.to_linear(*s as f32 / 65535.0);
1027            }
1028        }
1029        if premul {
1030            simd::premultiply_alpha_row(dst);
1031        }
1032    }
1033
1034    fn linear_f32_to_u16(
1035        &self,
1036        src: &[f32],
1037        dst: &mut [u16],
1038        _luts: &(),
1039        channels: usize,
1040        has_alpha: bool,
1041        unpremul: bool,
1042    ) {
1043        let encode = |src: &[f32], dst: &mut [u16]| {
1044            if has_alpha && channels >= 2 {
1045                for (src_px, dst_px) in src
1046                    .chunks_exact(channels)
1047                    .zip(dst.chunks_exact_mut(channels))
1048                {
1049                    for i in 0..channels - 1 {
1050                        dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
1051                            .clamp(0.0, 65535.0) as u16;
1052                    }
1053                    dst_px[channels - 1] =
1054                        (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
1055                }
1056            } else {
1057                for (s, d) in src.iter().zip(dst.iter_mut()) {
1058                    *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
1059                }
1060            }
1061        };
1062
1063        if unpremul {
1064            unpremultiply_to_scratch(src, |s| encode(s, dst));
1065        } else {
1066            encode(src, dst);
1067        }
1068    }
1069
1070    fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
1071        for (s, d) in src.iter().zip(dst.iter_mut()) {
1072            let linear = self.to_linear(*s as f32 / 255.0);
1073            *d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
1074        }
1075    }
1076
1077    fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
1078        for (s, d) in src.iter().zip(dst.iter_mut()) {
1079            let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
1080            *d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
1081        }
1082    }
1083
1084    fn f32_to_linear_inplace(
1085        &self,
1086        row: &mut [f32],
1087        channels: usize,
1088        has_alpha: bool,
1089        premul: bool,
1090    ) {
1091        simd::hlg_to_linear_row(row, channels, has_alpha);
1092        if premul {
1093            simd::premultiply_alpha_row(row);
1094        }
1095    }
1096
1097    fn linear_to_f32_inplace(
1098        &self,
1099        row: &mut [f32],
1100        channels: usize,
1101        has_alpha: bool,
1102        unpremul: bool,
1103    ) {
1104        if unpremul {
1105            simd::unpremultiply_alpha_row(row);
1106        }
1107        simd::hlg_from_linear_row(row, channels, has_alpha);
1108    }
1109}
1110
1111// =============================================================================
1112// Tests
1113// =============================================================================
1114
1115#[cfg(test)]
1116mod tests {
1117    use super::*;
1118    #[cfg(not(feature = "std"))]
1119    use alloc::{vec, vec::Vec};
1120
1121    #[test]
1122    fn no_transfer_roundtrip_u8() {
1123        let tf = NoTransfer;
1124        tf.build_luts();
1125        let src: Vec<u8> = (0..=255).collect();
1126        let mut f32_buf = vec![0.0f32; 256];
1127        let mut out = vec![0u8; 256];
1128
1129        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1130        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1131
1132        for i in 0..256 {
1133            assert_eq!(src[i], out[i], "NoTransfer roundtrip mismatch at {}", i);
1134        }
1135    }
1136
1137    #[test]
1138    fn srgb_roundtrip_u8() {
1139        let tf = Srgb;
1140        tf.build_luts();
1141        let src: Vec<u8> = (0..=255).collect();
1142        let mut f32_buf = vec![0.0f32; 256];
1143        let mut out = vec![0u8; 256];
1144
1145        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1146        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1147
1148        for i in 0..256 {
1149            let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1150            assert!(diff <= 1, "sRGB roundtrip off by {} at {}", diff, i);
1151        }
1152    }
1153
1154    #[test]
1155    fn srgb_roundtrip_u16() {
1156        let tf = Srgb;
1157        tf.build_luts();
1158
1159        // Test a spread of u16 values
1160        let values: Vec<u16> = (0..=65535).step_by(257).collect(); // 256 values
1161        let mut f32_buf = vec![0.0f32; values.len()];
1162        let mut out = vec![0u16; values.len()];
1163
1164        tf.u16_to_linear_f32(&values, &mut f32_buf, &(), 1, false, false);
1165        tf.linear_f32_to_u16(&f32_buf, &mut out, &(), 1, false, false);
1166
1167        for i in 0..values.len() {
1168            let diff = (values[i] as i32 - out[i] as i32).unsigned_abs();
1169            // LUT decode + polynomial encode: exact LUT forward, polynomial inverse.
1170            // Roundtrip should be ±1 from f32 intermediate rounding.
1171            assert!(
1172                diff <= 1,
1173                "sRGB u16 roundtrip off by {} at value {}: {} -> {} -> {}",
1174                diff,
1175                values[i],
1176                values[i],
1177                f32_buf[i],
1178                out[i]
1179            );
1180        }
1181    }
1182
1183    #[test]
1184    fn srgb_i12_matches_existing() {
1185        let tf = Srgb;
1186        tf.build_luts();
1187
1188        let src: Vec<u8> = (0..=255).collect();
1189        let mut via_tf = vec![0i16; 256];
1190        let mut via_direct = vec![0i16; 256];
1191
1192        tf.u8_to_linear_i12(&src, &mut via_tf, &());
1193        crate::color::srgb_u8_to_linear_i12_row(&src, &mut via_direct);
1194
1195        assert_eq!(via_tf, via_direct, "TF i12 path should match direct LUT");
1196    }
1197
1198    #[test]
1199    fn srgb_scalar_matches_lut() {
1200        let tf = Srgb;
1201        // Verify scalar to_linear matches the LUT-based path
1202        for i in 0..=255u8 {
1203            let from_scalar = tf.to_linear(i as f32 / 255.0);
1204            // The LUT gives us the same thing the linear-srgb crate computes
1205            let mut f32_buf = [0.0f32];
1206            crate::color::srgb_u8_to_linear_f32(&[i], &mut f32_buf, 1, false);
1207
1208            let diff = (from_scalar - f32_buf[0]).abs();
1209            assert!(
1210                diff < 1e-5,
1211                "sRGB scalar vs LUT mismatch at {}: scalar={}, lut={}",
1212                i,
1213                from_scalar,
1214                f32_buf[0]
1215            );
1216        }
1217    }
1218
1219    #[test]
1220    fn no_transfer_identity() {
1221        let tf = NoTransfer;
1222        assert!(tf.is_identity());
1223        assert_eq!(tf.to_linear(0.5), 0.5);
1224        assert_eq!(tf.from_linear(0.5), 0.5);
1225    }
1226
1227    #[test]
1228    fn srgb_not_identity() {
1229        let tf = Srgb;
1230        assert!(!tf.is_identity());
1231        // sRGB mid-gray (0.5 encoded) should be ~0.214 linear
1232        let linear = tf.to_linear(0.5);
1233        assert!(
1234            (linear - 0.214).abs() < 0.01,
1235            "sRGB 0.5 → linear = {} (expected ~0.214)",
1236            linear
1237        );
1238    }
1239
1240    #[test]
1241    fn srgb_u8_to_f32_with_alpha() {
1242        let tf = Srgb;
1243        tf.build_luts();
1244
1245        // RGBA pixel: [128, 64, 32, 200]
1246        let src = [128u8, 64, 32, 200];
1247        let mut dst = [0.0f32; 4];
1248
1249        tf.u8_to_linear_f32(&src, &mut dst, &(), 4, true, false);
1250
1251        // RGB should be linearized, alpha should be v/255
1252        assert!(dst[0] > 0.2 && dst[0] < 0.3, "R linear: {}", dst[0]);
1253        assert!((dst[3] - 200.0 / 255.0).abs() < 0.01, "A: {}", dst[3]);
1254    }
1255
1256    #[test]
1257    fn srgb_u8_premul_unpremul_roundtrip() {
1258        let tf = Srgb;
1259        tf.build_luts();
1260
1261        let src = [128u8, 64, 32, 200];
1262        let mut f32_buf = [0.0f32; 4];
1263        let mut out = [0u8; 4];
1264
1265        // Encode → premul linear
1266        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 4, true, true);
1267        // Premul linear → decode (with unpremul)
1268        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 4, true, true);
1269
1270        for i in 0..4 {
1271            let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1272            assert!(
1273                diff <= 1,
1274                "Premul roundtrip off by {} at channel {}: {} vs {}",
1275                diff,
1276                i,
1277                src[i],
1278                out[i]
1279            );
1280        }
1281    }
1282
1283    #[test]
1284    fn no_alpha_3ch_roundtrip() {
1285        let tf = Srgb;
1286        tf.build_luts();
1287
1288        let src = [128u8, 64, 32, 200, 100, 50]; // 2 RGB pixels
1289        let mut f32_buf = [0.0f32; 6];
1290        let mut out = [0u8; 6];
1291
1292        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 3, false, false);
1293        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 3, false, false);
1294
1295        for i in 0..6 {
1296            let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1297            assert!(
1298                diff <= 1,
1299                "3ch roundtrip off by {} at {}: {} vs {}",
1300                diff,
1301                i,
1302                src[i],
1303                out[i]
1304            );
1305        }
1306    }
1307
1308    // === BT.709 tests ===
1309
1310    #[test]
1311    fn bt709_roundtrip_u8() {
1312        let tf = Bt709;
1313        tf.build_luts();
1314        let src: Vec<u8> = (0..=255).collect();
1315        let mut f32_buf = vec![0.0f32; 256];
1316        let mut out = vec![0u8; 256];
1317
1318        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1319        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1320
1321        for i in 0..256 {
1322            let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1323            assert!(diff <= 1, "BT.709 roundtrip off by {} at {}", diff, i);
1324        }
1325    }
1326
1327    #[test]
1328    fn bt709_monotonic() {
1329        let tf = Bt709;
1330        let mut prev = 0.0f32;
1331        for i in 0..=255u8 {
1332            let linear = tf.to_linear(i as f32 / 255.0);
1333            assert!(
1334                linear >= prev,
1335                "BT.709 to_linear not monotonic at {}: {} < {}",
1336                i,
1337                linear,
1338                prev
1339            );
1340            prev = linear;
1341        }
1342    }
1343
1344    #[test]
1345    fn bt709_endpoints() {
1346        let tf = Bt709;
1347        assert!((tf.to_linear(0.0)).abs() < 1e-7);
1348        assert!((tf.to_linear(1.0) - 1.0).abs() < 1e-5);
1349        assert!((tf.from_linear(0.0)).abs() < 1e-7);
1350        assert!((tf.from_linear(1.0) - 1.0).abs() < 1e-5);
1351    }
1352
1353    // === PQ tests ===
1354
1355    #[test]
1356    fn pq_roundtrip_u8() {
1357        let tf = Pq;
1358        tf.build_luts();
1359        let src: Vec<u8> = (0..=255).collect();
1360        let mut f32_buf = vec![0.0f32; 256];
1361        let mut out = vec![0u8; 256];
1362
1363        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1364        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1365
1366        for i in 0..256 {
1367            let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1368            assert!(diff <= 1, "PQ roundtrip off by {} at {}", diff, i);
1369        }
1370    }
1371
1372    #[test]
1373    fn pq_monotonic() {
1374        let tf = Pq;
1375        let mut prev = 0.0f32;
1376        for i in 0..=255u8 {
1377            let linear = tf.to_linear(i as f32 / 255.0);
1378            assert!(
1379                linear >= prev,
1380                "PQ to_linear not monotonic at {}: {} < {}",
1381                i,
1382                linear,
1383                prev
1384            );
1385            prev = linear;
1386        }
1387    }
1388
1389    #[test]
1390    fn pq_endpoints() {
1391        let tf = Pq;
1392        assert!((tf.to_linear(0.0)).abs() < 1e-7);
1393        assert!((tf.to_linear(1.0) - 1.0).abs() < 1e-4);
1394        assert!((tf.from_linear(0.0)).abs() < 1e-7);
1395        assert!((tf.from_linear(1.0) - 1.0).abs() < 1e-4);
1396    }
1397
1398    // === HLG tests ===
1399
1400    #[test]
1401    fn hlg_roundtrip_u8() {
1402        let tf = Hlg;
1403        tf.build_luts();
1404        let src: Vec<u8> = (0..=255).collect();
1405        let mut f32_buf = vec![0.0f32; 256];
1406        let mut out = vec![0u8; 256];
1407
1408        tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1409        tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1410
1411        for i in 0..256 {
1412            let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1413            assert!(diff <= 1, "HLG roundtrip off by {} at {}", diff, i);
1414        }
1415    }
1416
1417    #[test]
1418    fn hlg_monotonic() {
1419        let tf = Hlg;
1420        let mut prev = 0.0f32;
1421        for i in 0..=255u8 {
1422            let linear = tf.to_linear(i as f32 / 255.0);
1423            assert!(
1424                linear >= prev,
1425                "HLG to_linear not monotonic at {}: {} < {}",
1426                i,
1427                linear,
1428                prev
1429            );
1430            prev = linear;
1431        }
1432    }
1433
1434    #[test]
1435    fn hlg_endpoints() {
1436        let tf = Hlg;
1437        assert!((tf.to_linear(0.0)).abs() < 1e-7);
1438        // HLG OETF inverse at 1.0 should give ~1/3 (since HLG maps [0,1/3] scene linear)
1439        // Actually at signal=1.0, scene linear = (exp((1.0 - C)/A) + B) / 12
1440        let at_one = tf.to_linear(1.0);
1441        assert!(at_one > 0.0, "HLG(1.0) should be positive: {}", at_one);
1442        // And from_linear should round-trip
1443        let back = tf.from_linear(at_one);
1444        assert!(
1445            (back - 1.0).abs() < 1e-5,
1446            "HLG roundtrip at 1.0: {} -> {} -> {}",
1447            1.0,
1448            at_one,
1449            back
1450        );
1451    }
1452
1453    // === CICP constructor tests ===
1454
1455    #[test]
1456    fn cicp_transfer_known_codes() {
1457        use zenpixels::TransferFunction;
1458        assert_eq!(
1459            TransferFunction::from_cicp(1),
1460            Some(TransferFunction::Bt709)
1461        );
1462        // TC=6 (BT.601) is not mapped by zenpixels
1463        assert_eq!(TransferFunction::from_cicp(6), None);
1464        assert_eq!(
1465            TransferFunction::from_cicp(8),
1466            Some(TransferFunction::Linear)
1467        );
1468        assert_eq!(
1469            TransferFunction::from_cicp(13),
1470            Some(TransferFunction::Srgb)
1471        );
1472        assert_eq!(TransferFunction::from_cicp(16), Some(TransferFunction::Pq));
1473        assert_eq!(TransferFunction::from_cicp(18), Some(TransferFunction::Hlg));
1474    }
1475
1476    #[test]
1477    fn cicp_transfer_unknown_codes() {
1478        use zenpixels::TransferFunction;
1479        assert_eq!(TransferFunction::from_cicp(0), None);
1480        assert_eq!(TransferFunction::from_cicp(2), None);
1481        assert_eq!(TransferFunction::from_cicp(255), None);
1482    }
1483}