1#[cfg(not(feature = "std"))]
22use alloc::{vec, vec::Vec};
23#[cfg(feature = "std")]
24use core::cell::RefCell;
25
26use crate::color;
27use crate::fastmath;
28use crate::simd;
29
30#[cfg(feature = "std")]
40fn unpremultiply_to_scratch(src: &[f32], f: impl FnOnce(&[f32])) {
41 thread_local! {
42 static SCRATCH: RefCell<Vec<f32>> = const { RefCell::new(Vec::new()) };
43 }
44 SCRATCH.with(|cell| {
45 let mut scratch = cell.borrow_mut();
46 scratch.clear();
47 scratch.extend_from_slice(src);
48 simd::unpremultiply_alpha_row(&mut scratch);
49 f(&scratch);
50 });
51}
52
53#[cfg(not(feature = "std"))]
54fn unpremultiply_to_scratch(src: &[f32], f: impl FnOnce(&[f32])) {
55 let mut tmp = src.to_vec();
56 simd::unpremultiply_alpha_row(&mut tmp);
57 f(&tmp);
58}
59
60pub trait TransferCurve: Send + Sync + 'static {
78 type Luts: Send + Sync;
83
84 fn to_linear(&self, encoded: f32) -> f32;
89
90 #[allow(clippy::wrong_self_convention)]
92 fn from_linear(&self, linear: f32) -> f32;
93
94 fn is_identity(&self) -> bool {
96 false
97 }
98
99 fn build_luts(&self) -> Self::Luts;
101
102 fn u8_to_linear_f32(
111 &self,
112 src: &[u8],
113 dst: &mut [f32],
114 luts: &Self::Luts,
115 channels: usize,
116 has_alpha: bool,
117 premul: bool,
118 );
119
120 fn linear_f32_to_u8(
124 &self,
125 src: &[f32],
126 dst: &mut [u8],
127 luts: &Self::Luts,
128 channels: usize,
129 has_alpha: bool,
130 unpremul: bool,
131 );
132
133 fn u16_to_linear_f32(
140 &self,
141 src: &[u16],
142 dst: &mut [f32],
143 luts: &Self::Luts,
144 channels: usize,
145 has_alpha: bool,
146 premul: bool,
147 );
148
149 fn linear_f32_to_u16(
151 &self,
152 src: &[f32],
153 dst: &mut [u16],
154 luts: &Self::Luts,
155 channels: usize,
156 has_alpha: bool,
157 unpremul: bool,
158 );
159
160 fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], luts: &Self::Luts);
165
166 fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], luts: &Self::Luts);
168
169 fn f32_to_linear_inplace(
173 &self,
174 row: &mut [f32],
175 channels: usize,
176 has_alpha: bool,
177 premul: bool,
178 );
179
180 fn linear_to_f32_inplace(
183 &self,
184 row: &mut [f32],
185 channels: usize,
186 has_alpha: bool,
187 unpremul: bool,
188 );
189}
190
191#[derive(Debug, Clone, Copy, Default)]
201pub struct NoTransfer;
202
203impl TransferCurve for NoTransfer {
204 type Luts = ();
205
206 #[inline]
207 fn to_linear(&self, v: f32) -> f32 {
208 v
209 }
210
211 #[inline]
212 fn from_linear(&self, v: f32) -> f32 {
213 v
214 }
215
216 #[inline]
217 fn is_identity(&self) -> bool {
218 true
219 }
220
221 #[inline]
222 fn build_luts(&self) -> Self::Luts {}
223
224 fn u8_to_linear_f32(
225 &self,
226 src: &[u8],
227 dst: &mut [f32],
228 _luts: &(),
229 _channels: usize,
230 _has_alpha: bool,
231 premul: bool,
232 ) {
233 simd::u8_to_f32_row(src, dst);
234 if premul {
235 simd::premultiply_alpha_row(dst);
236 }
237 }
238
239 fn linear_f32_to_u8(
240 &self,
241 src: &[f32],
242 dst: &mut [u8],
243 _luts: &(),
244 _channels: usize,
245 _has_alpha: bool,
246 unpremul: bool,
247 ) {
248 if unpremul {
249 unpremultiply_to_scratch(src, |s| simd::f32_to_u8_row(s, dst));
250 } else {
251 simd::f32_to_u8_row(src, dst);
252 }
253 }
254
255 fn u16_to_linear_f32(
256 &self,
257 src: &[u16],
258 dst: &mut [f32],
259 _luts: &(),
260 _channels: usize,
261 _has_alpha: bool,
262 premul: bool,
263 ) {
264 for (s, d) in src.iter().zip(dst.iter_mut()) {
265 *d = *s as f32 / 65535.0;
266 }
267 if premul {
268 simd::premultiply_alpha_row(dst);
269 }
270 }
271
272 fn linear_f32_to_u16(
273 &self,
274 src: &[f32],
275 dst: &mut [u16],
276 _luts: &(),
277 _channels: usize,
278 _has_alpha: bool,
279 unpremul: bool,
280 ) {
281 if unpremul {
282 unpremultiply_to_scratch(src, |s| {
283 for (sv, d) in s.iter().zip(dst.iter_mut()) {
284 *d = (*sv * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
285 }
286 });
287 } else {
288 for (s, d) in src.iter().zip(dst.iter_mut()) {
289 *d = (*s * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
290 }
291 }
292 }
293
294 fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
295 for (s, d) in src.iter().zip(dst.iter_mut()) {
297 *d = ((*s as u32 * 4095 + 127) / 255) as i16;
299 }
300 }
301
302 fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
303 for (s, d) in src.iter().zip(dst.iter_mut()) {
305 let clamped = (*s).clamp(0, 4095) as u32;
306 *d = ((clamped * 255 + 2047) / 4095) as u8;
307 }
308 }
309
310 fn f32_to_linear_inplace(
311 &self,
312 row: &mut [f32],
313 _channels: usize,
314 _has_alpha: bool,
315 premul: bool,
316 ) {
317 if premul {
318 simd::premultiply_alpha_row(row);
319 }
320 }
321
322 fn linear_to_f32_inplace(
323 &self,
324 row: &mut [f32],
325 _channels: usize,
326 _has_alpha: bool,
327 unpremul: bool,
328 ) {
329 if unpremul {
330 simd::unpremultiply_alpha_row(row);
331 }
332 }
333}
334
335#[derive(Debug, Clone, Copy, Default)]
348pub struct Srgb;
349
350impl TransferCurve for Srgb {
351 type Luts = ();
352
353 #[inline]
354 fn to_linear(&self, encoded: f32) -> f32 {
355 fastmath::srgb_to_linear(encoded)
356 }
357
358 #[inline]
359 fn from_linear(&self, linear: f32) -> f32 {
360 fastmath::srgb_from_linear(linear)
361 }
362
363 #[inline]
364 fn build_luts(&self) -> Self::Luts {
365 }
367
368 fn u8_to_linear_f32(
369 &self,
370 src: &[u8],
371 dst: &mut [f32],
372 _luts: &(),
373 channels: usize,
374 has_alpha: bool,
375 premul: bool,
376 ) {
377 color::srgb_u8_to_linear_f32(src, dst, channels, has_alpha);
378 if premul {
379 simd::premultiply_alpha_row(dst);
380 }
381 }
382
383 fn linear_f32_to_u8(
384 &self,
385 src: &[f32],
386 dst: &mut [u8],
387 _luts: &(),
388 channels: usize,
389 has_alpha: bool,
390 unpremul: bool,
391 ) {
392 if unpremul {
393 unpremultiply_to_scratch(src, |s| {
394 color::linear_f32_to_srgb_u8(s, dst, channels, has_alpha);
395 });
396 } else {
397 color::linear_f32_to_srgb_u8(src, dst, channels, has_alpha);
398 }
399 }
400
401 fn u16_to_linear_f32(
402 &self,
403 src: &[u16],
404 dst: &mut [f32],
405 _luts: &(),
406 channels: usize,
407 has_alpha: bool,
408 premul: bool,
409 ) {
410 use linear_srgb::default::{srgb_u16_to_linear, srgb_u16_to_linear_rgba_slice};
412
413 if has_alpha && channels == 4 {
414 srgb_u16_to_linear_rgba_slice(src, dst);
415 } else if has_alpha && channels >= 2 {
416 for (src_px, dst_px) in src
417 .chunks_exact(channels)
418 .zip(dst.chunks_exact_mut(channels))
419 {
420 for i in 0..channels - 1 {
421 dst_px[i] = srgb_u16_to_linear(src_px[i]);
422 }
423 dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
424 }
425 } else {
426 for (s, d) in src.iter().zip(dst.iter_mut()) {
427 *d = srgb_u16_to_linear(*s);
428 }
429 }
430 if premul {
431 simd::premultiply_alpha_row(dst);
432 }
433 }
434
435 fn linear_f32_to_u16(
436 &self,
437 src: &[f32],
438 dst: &mut [u16],
439 _luts: &(),
440 channels: usize,
441 has_alpha: bool,
442 unpremul: bool,
443 ) {
444 let encode = |src: &[f32], dst: &mut [u16]| {
449 if has_alpha && channels >= 2 {
450 for (src_px, dst_px) in src
451 .chunks_exact(channels)
452 .zip(dst.chunks_exact_mut(channels))
453 {
454 for i in 0..channels - 1 {
455 dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
456 .clamp(0.0, 65535.0) as u16;
457 }
458 dst_px[channels - 1] =
459 (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
460 }
461 } else {
462 for (s, d) in src.iter().zip(dst.iter_mut()) {
463 *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
464 }
465 }
466 };
467
468 if unpremul {
469 unpremultiply_to_scratch(src, |s| encode(s, dst));
470 } else {
471 encode(src, dst);
472 }
473 }
474
475 fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
476 color::srgb_u8_to_linear_i12_row(src, dst);
477 }
478
479 fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
480 color::linear_i12_to_srgb_u8_row(src, dst);
481 }
482
483 fn f32_to_linear_inplace(
484 &self,
485 row: &mut [f32],
486 channels: usize,
487 has_alpha: bool,
488 premul: bool,
489 ) {
490 simd::srgb_to_linear_row(row, channels, has_alpha);
491 if premul {
492 simd::premultiply_alpha_row(row);
493 }
494 }
495
496 fn linear_to_f32_inplace(
497 &self,
498 row: &mut [f32],
499 channels: usize,
500 has_alpha: bool,
501 unpremul: bool,
502 ) {
503 if unpremul {
504 simd::unpremultiply_alpha_row(row);
505 }
506 simd::srgb_from_linear_row(row, channels, has_alpha);
507 }
508}
509
510#[derive(Debug, Clone, Copy, Default)]
520pub struct Bt709;
521
522impl TransferCurve for Bt709 {
525 type Luts = ();
526
527 #[inline]
528 fn to_linear(&self, v: f32) -> f32 {
529 fastmath::bt709_to_linear(v)
530 }
531
532 #[inline]
533 fn from_linear(&self, v: f32) -> f32 {
534 fastmath::bt709_from_linear(v)
535 }
536
537 fn build_luts(&self) -> Self::Luts {}
538
539 fn u8_to_linear_f32(
540 &self,
541 src: &[u8],
542 dst: &mut [f32],
543 _luts: &(),
544 channels: usize,
545 has_alpha: bool,
546 premul: bool,
547 ) {
548 if has_alpha && channels >= 2 {
549 for (src_px, dst_px) in src
550 .chunks_exact(channels)
551 .zip(dst.chunks_exact_mut(channels))
552 {
553 for i in 0..channels - 1 {
554 dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
555 }
556 dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
557 }
558 } else {
559 for (s, d) in src.iter().zip(dst.iter_mut()) {
560 *d = self.to_linear(*s as f32 / 255.0);
561 }
562 }
563 if premul {
564 simd::premultiply_alpha_row(dst);
565 }
566 }
567
568 fn linear_f32_to_u8(
569 &self,
570 src: &[f32],
571 dst: &mut [u8],
572 _luts: &(),
573 channels: usize,
574 has_alpha: bool,
575 unpremul: bool,
576 ) {
577 let encode = |src: &[f32], dst: &mut [u8]| {
578 if has_alpha && channels >= 2 {
579 for (src_px, dst_px) in src
580 .chunks_exact(channels)
581 .zip(dst.chunks_exact_mut(channels))
582 {
583 for i in 0..channels - 1 {
584 dst_px[i] =
585 (self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
586 }
587 dst_px[channels - 1] =
588 (src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
589 }
590 } else {
591 for (s, d) in src.iter().zip(dst.iter_mut()) {
592 *d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
593 }
594 }
595 };
596
597 if unpremul {
598 unpremultiply_to_scratch(src, |s| encode(s, dst));
599 } else {
600 encode(src, dst);
601 }
602 }
603
604 fn u16_to_linear_f32(
605 &self,
606 src: &[u16],
607 dst: &mut [f32],
608 _luts: &(),
609 channels: usize,
610 has_alpha: bool,
611 premul: bool,
612 ) {
613 if has_alpha && channels >= 2 {
614 for (src_px, dst_px) in src
615 .chunks_exact(channels)
616 .zip(dst.chunks_exact_mut(channels))
617 {
618 for i in 0..channels - 1 {
619 dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
620 }
621 dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
622 }
623 } else {
624 for (s, d) in src.iter().zip(dst.iter_mut()) {
625 *d = self.to_linear(*s as f32 / 65535.0);
626 }
627 }
628 if premul {
629 simd::premultiply_alpha_row(dst);
630 }
631 }
632
633 fn linear_f32_to_u16(
634 &self,
635 src: &[f32],
636 dst: &mut [u16],
637 _luts: &(),
638 channels: usize,
639 has_alpha: bool,
640 unpremul: bool,
641 ) {
642 let encode = |src: &[f32], dst: &mut [u16]| {
643 if has_alpha && channels >= 2 {
644 for (src_px, dst_px) in src
645 .chunks_exact(channels)
646 .zip(dst.chunks_exact_mut(channels))
647 {
648 for i in 0..channels - 1 {
649 dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
650 .clamp(0.0, 65535.0) as u16;
651 }
652 dst_px[channels - 1] =
653 (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
654 }
655 } else {
656 for (s, d) in src.iter().zip(dst.iter_mut()) {
657 *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
658 }
659 }
660 };
661
662 if unpremul {
663 unpremultiply_to_scratch(src, |s| encode(s, dst));
664 } else {
665 encode(src, dst);
666 }
667 }
668
669 fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
670 for (s, d) in src.iter().zip(dst.iter_mut()) {
671 let linear = self.to_linear(*s as f32 / 255.0);
672 *d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
673 }
674 }
675
676 fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
677 for (s, d) in src.iter().zip(dst.iter_mut()) {
678 let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
679 *d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
680 }
681 }
682
683 fn f32_to_linear_inplace(
684 &self,
685 row: &mut [f32],
686 channels: usize,
687 has_alpha: bool,
688 premul: bool,
689 ) {
690 simd::bt709_to_linear_row(row, channels, has_alpha);
691 if premul {
692 simd::premultiply_alpha_row(row);
693 }
694 }
695
696 fn linear_to_f32_inplace(
697 &self,
698 row: &mut [f32],
699 channels: usize,
700 has_alpha: bool,
701 unpremul: bool,
702 ) {
703 if unpremul {
704 simd::unpremultiply_alpha_row(row);
705 }
706 simd::bt709_from_linear_row(row, channels, has_alpha);
707 }
708}
709
710#[derive(Debug, Clone, Copy, Default)]
721pub struct Pq;
722
723impl TransferCurve for Pq {
726 type Luts = ();
727
728 #[inline]
729 fn to_linear(&self, v: f32) -> f32 {
730 fastmath::pq_to_linear(v)
731 }
732
733 #[inline]
734 fn from_linear(&self, v: f32) -> f32 {
735 fastmath::pq_from_linear(v)
736 }
737
738 fn build_luts(&self) -> Self::Luts {}
739
740 fn u8_to_linear_f32(
741 &self,
742 src: &[u8],
743 dst: &mut [f32],
744 _luts: &(),
745 channels: usize,
746 has_alpha: bool,
747 premul: bool,
748 ) {
749 if has_alpha && channels >= 2 {
750 for (src_px, dst_px) in src
751 .chunks_exact(channels)
752 .zip(dst.chunks_exact_mut(channels))
753 {
754 for i in 0..channels - 1 {
755 dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
756 }
757 dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
758 }
759 } else {
760 for (s, d) in src.iter().zip(dst.iter_mut()) {
761 *d = self.to_linear(*s as f32 / 255.0);
762 }
763 }
764 if premul {
765 simd::premultiply_alpha_row(dst);
766 }
767 }
768
769 fn linear_f32_to_u8(
770 &self,
771 src: &[f32],
772 dst: &mut [u8],
773 _luts: &(),
774 channels: usize,
775 has_alpha: bool,
776 unpremul: bool,
777 ) {
778 let encode = |src: &[f32], dst: &mut [u8]| {
779 if has_alpha && channels >= 2 {
780 for (src_px, dst_px) in src
781 .chunks_exact(channels)
782 .zip(dst.chunks_exact_mut(channels))
783 {
784 for i in 0..channels - 1 {
785 dst_px[i] =
786 (self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
787 }
788 dst_px[channels - 1] =
789 (src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
790 }
791 } else {
792 for (s, d) in src.iter().zip(dst.iter_mut()) {
793 *d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
794 }
795 }
796 };
797
798 if unpremul {
799 unpremultiply_to_scratch(src, |s| encode(s, dst));
800 } else {
801 encode(src, dst);
802 }
803 }
804
805 fn u16_to_linear_f32(
806 &self,
807 src: &[u16],
808 dst: &mut [f32],
809 _luts: &(),
810 channels: usize,
811 has_alpha: bool,
812 premul: bool,
813 ) {
814 if has_alpha && channels >= 2 {
815 for (src_px, dst_px) in src
816 .chunks_exact(channels)
817 .zip(dst.chunks_exact_mut(channels))
818 {
819 for i in 0..channels - 1 {
820 dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
821 }
822 dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
823 }
824 } else {
825 for (s, d) in src.iter().zip(dst.iter_mut()) {
826 *d = self.to_linear(*s as f32 / 65535.0);
827 }
828 }
829 if premul {
830 simd::premultiply_alpha_row(dst);
831 }
832 }
833
834 fn linear_f32_to_u16(
835 &self,
836 src: &[f32],
837 dst: &mut [u16],
838 _luts: &(),
839 channels: usize,
840 has_alpha: bool,
841 unpremul: bool,
842 ) {
843 let encode = |src: &[f32], dst: &mut [u16]| {
844 if has_alpha && channels >= 2 {
845 for (src_px, dst_px) in src
846 .chunks_exact(channels)
847 .zip(dst.chunks_exact_mut(channels))
848 {
849 for i in 0..channels - 1 {
850 dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
851 .clamp(0.0, 65535.0) as u16;
852 }
853 dst_px[channels - 1] =
854 (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
855 }
856 } else {
857 for (s, d) in src.iter().zip(dst.iter_mut()) {
858 *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
859 }
860 }
861 };
862
863 if unpremul {
864 unpremultiply_to_scratch(src, |s| encode(s, dst));
865 } else {
866 encode(src, dst);
867 }
868 }
869
870 fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
871 for (s, d) in src.iter().zip(dst.iter_mut()) {
872 let linear = self.to_linear(*s as f32 / 255.0);
873 *d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
874 }
875 }
876
877 fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
878 for (s, d) in src.iter().zip(dst.iter_mut()) {
879 let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
880 *d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
881 }
882 }
883
884 fn f32_to_linear_inplace(
885 &self,
886 row: &mut [f32],
887 channels: usize,
888 has_alpha: bool,
889 premul: bool,
890 ) {
891 simd::pq_to_linear_row(row, channels, has_alpha);
892 if premul {
893 simd::premultiply_alpha_row(row);
894 }
895 }
896
897 fn linear_to_f32_inplace(
898 &self,
899 row: &mut [f32],
900 channels: usize,
901 has_alpha: bool,
902 unpremul: bool,
903 ) {
904 if unpremul {
905 simd::unpremultiply_alpha_row(row);
906 }
907 simd::pq_from_linear_row(row, channels, has_alpha);
908 }
909}
910
911#[derive(Debug, Clone, Copy, Default)]
921pub struct Hlg;
922
923impl TransferCurve for Hlg {
926 type Luts = ();
927
928 #[inline]
929 fn to_linear(&self, v: f32) -> f32 {
930 fastmath::hlg_to_linear(v)
931 }
932
933 #[inline]
934 fn from_linear(&self, v: f32) -> f32 {
935 fastmath::hlg_from_linear(v)
936 }
937
938 fn build_luts(&self) -> Self::Luts {}
939
940 fn u8_to_linear_f32(
941 &self,
942 src: &[u8],
943 dst: &mut [f32],
944 _luts: &(),
945 channels: usize,
946 has_alpha: bool,
947 premul: bool,
948 ) {
949 if has_alpha && channels >= 2 {
950 for (src_px, dst_px) in src
951 .chunks_exact(channels)
952 .zip(dst.chunks_exact_mut(channels))
953 {
954 for i in 0..channels - 1 {
955 dst_px[i] = self.to_linear(src_px[i] as f32 / 255.0);
956 }
957 dst_px[channels - 1] = src_px[channels - 1] as f32 / 255.0;
958 }
959 } else {
960 for (s, d) in src.iter().zip(dst.iter_mut()) {
961 *d = self.to_linear(*s as f32 / 255.0);
962 }
963 }
964 if premul {
965 simd::premultiply_alpha_row(dst);
966 }
967 }
968
969 fn linear_f32_to_u8(
970 &self,
971 src: &[f32],
972 dst: &mut [u8],
973 _luts: &(),
974 channels: usize,
975 has_alpha: bool,
976 unpremul: bool,
977 ) {
978 let encode = |src: &[f32], dst: &mut [u8]| {
979 if has_alpha && channels >= 2 {
980 for (src_px, dst_px) in src
981 .chunks_exact(channels)
982 .zip(dst.chunks_exact_mut(channels))
983 {
984 for i in 0..channels - 1 {
985 dst_px[i] =
986 (self.from_linear(src_px[i]) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
987 }
988 dst_px[channels - 1] =
989 (src_px[channels - 1] * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
990 }
991 } else {
992 for (s, d) in src.iter().zip(dst.iter_mut()) {
993 *d = (self.from_linear(*s) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
994 }
995 }
996 };
997
998 if unpremul {
999 unpremultiply_to_scratch(src, |s| encode(s, dst));
1000 } else {
1001 encode(src, dst);
1002 }
1003 }
1004
1005 fn u16_to_linear_f32(
1006 &self,
1007 src: &[u16],
1008 dst: &mut [f32],
1009 _luts: &(),
1010 channels: usize,
1011 has_alpha: bool,
1012 premul: bool,
1013 ) {
1014 if has_alpha && channels >= 2 {
1015 for (src_px, dst_px) in src
1016 .chunks_exact(channels)
1017 .zip(dst.chunks_exact_mut(channels))
1018 {
1019 for i in 0..channels - 1 {
1020 dst_px[i] = self.to_linear(src_px[i] as f32 / 65535.0);
1021 }
1022 dst_px[channels - 1] = src_px[channels - 1] as f32 / 65535.0;
1023 }
1024 } else {
1025 for (s, d) in src.iter().zip(dst.iter_mut()) {
1026 *d = self.to_linear(*s as f32 / 65535.0);
1027 }
1028 }
1029 if premul {
1030 simd::premultiply_alpha_row(dst);
1031 }
1032 }
1033
1034 fn linear_f32_to_u16(
1035 &self,
1036 src: &[f32],
1037 dst: &mut [u16],
1038 _luts: &(),
1039 channels: usize,
1040 has_alpha: bool,
1041 unpremul: bool,
1042 ) {
1043 let encode = |src: &[f32], dst: &mut [u16]| {
1044 if has_alpha && channels >= 2 {
1045 for (src_px, dst_px) in src
1046 .chunks_exact(channels)
1047 .zip(dst.chunks_exact_mut(channels))
1048 {
1049 for i in 0..channels - 1 {
1050 dst_px[i] = (self.from_linear(src_px[i]) * 65535.0 + 0.5)
1051 .clamp(0.0, 65535.0) as u16;
1052 }
1053 dst_px[channels - 1] =
1054 (src_px[channels - 1] * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
1055 }
1056 } else {
1057 for (s, d) in src.iter().zip(dst.iter_mut()) {
1058 *d = (self.from_linear(*s) * 65535.0 + 0.5).clamp(0.0, 65535.0) as u16;
1059 }
1060 }
1061 };
1062
1063 if unpremul {
1064 unpremultiply_to_scratch(src, |s| encode(s, dst));
1065 } else {
1066 encode(src, dst);
1067 }
1068 }
1069
1070 fn u8_to_linear_i12(&self, src: &[u8], dst: &mut [i16], _luts: &()) {
1071 for (s, d) in src.iter().zip(dst.iter_mut()) {
1072 let linear = self.to_linear(*s as f32 / 255.0);
1073 *d = (linear * 4095.0 + 0.5).clamp(0.0, 4095.0) as i16;
1074 }
1075 }
1076
1077 fn linear_i12_to_u8(&self, src: &[i16], dst: &mut [u8], _luts: &()) {
1078 for (s, d) in src.iter().zip(dst.iter_mut()) {
1079 let linear = (*s).clamp(0, 4095) as f32 / 4095.0;
1080 *d = (self.from_linear(linear) * 255.0 + 0.5).clamp(0.0, 255.0) as u8;
1081 }
1082 }
1083
1084 fn f32_to_linear_inplace(
1085 &self,
1086 row: &mut [f32],
1087 channels: usize,
1088 has_alpha: bool,
1089 premul: bool,
1090 ) {
1091 simd::hlg_to_linear_row(row, channels, has_alpha);
1092 if premul {
1093 simd::premultiply_alpha_row(row);
1094 }
1095 }
1096
1097 fn linear_to_f32_inplace(
1098 &self,
1099 row: &mut [f32],
1100 channels: usize,
1101 has_alpha: bool,
1102 unpremul: bool,
1103 ) {
1104 if unpremul {
1105 simd::unpremultiply_alpha_row(row);
1106 }
1107 simd::hlg_from_linear_row(row, channels, has_alpha);
1108 }
1109}
1110
1111#[cfg(test)]
1116mod tests {
1117 use super::*;
1118 #[cfg(not(feature = "std"))]
1119 use alloc::{vec, vec::Vec};
1120
1121 #[test]
1122 fn no_transfer_roundtrip_u8() {
1123 let tf = NoTransfer;
1124 tf.build_luts();
1125 let src: Vec<u8> = (0..=255).collect();
1126 let mut f32_buf = vec![0.0f32; 256];
1127 let mut out = vec![0u8; 256];
1128
1129 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1130 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1131
1132 for i in 0..256 {
1133 assert_eq!(src[i], out[i], "NoTransfer roundtrip mismatch at {}", i);
1134 }
1135 }
1136
1137 #[test]
1138 fn srgb_roundtrip_u8() {
1139 let tf = Srgb;
1140 tf.build_luts();
1141 let src: Vec<u8> = (0..=255).collect();
1142 let mut f32_buf = vec![0.0f32; 256];
1143 let mut out = vec![0u8; 256];
1144
1145 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1146 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1147
1148 for i in 0..256 {
1149 let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1150 assert!(diff <= 1, "sRGB roundtrip off by {} at {}", diff, i);
1151 }
1152 }
1153
1154 #[test]
1155 fn srgb_roundtrip_u16() {
1156 let tf = Srgb;
1157 tf.build_luts();
1158
1159 let values: Vec<u16> = (0..=65535).step_by(257).collect(); let mut f32_buf = vec![0.0f32; values.len()];
1162 let mut out = vec![0u16; values.len()];
1163
1164 tf.u16_to_linear_f32(&values, &mut f32_buf, &(), 1, false, false);
1165 tf.linear_f32_to_u16(&f32_buf, &mut out, &(), 1, false, false);
1166
1167 for i in 0..values.len() {
1168 let diff = (values[i] as i32 - out[i] as i32).unsigned_abs();
1169 assert!(
1172 diff <= 1,
1173 "sRGB u16 roundtrip off by {} at value {}: {} -> {} -> {}",
1174 diff,
1175 values[i],
1176 values[i],
1177 f32_buf[i],
1178 out[i]
1179 );
1180 }
1181 }
1182
1183 #[test]
1184 fn srgb_i12_matches_existing() {
1185 let tf = Srgb;
1186 tf.build_luts();
1187
1188 let src: Vec<u8> = (0..=255).collect();
1189 let mut via_tf = vec![0i16; 256];
1190 let mut via_direct = vec![0i16; 256];
1191
1192 tf.u8_to_linear_i12(&src, &mut via_tf, &());
1193 crate::color::srgb_u8_to_linear_i12_row(&src, &mut via_direct);
1194
1195 assert_eq!(via_tf, via_direct, "TF i12 path should match direct LUT");
1196 }
1197
1198 #[test]
1199 fn srgb_scalar_matches_lut() {
1200 let tf = Srgb;
1201 for i in 0..=255u8 {
1203 let from_scalar = tf.to_linear(i as f32 / 255.0);
1204 let mut f32_buf = [0.0f32];
1206 crate::color::srgb_u8_to_linear_f32(&[i], &mut f32_buf, 1, false);
1207
1208 let diff = (from_scalar - f32_buf[0]).abs();
1209 assert!(
1210 diff < 1e-5,
1211 "sRGB scalar vs LUT mismatch at {}: scalar={}, lut={}",
1212 i,
1213 from_scalar,
1214 f32_buf[0]
1215 );
1216 }
1217 }
1218
1219 #[test]
1220 fn no_transfer_identity() {
1221 let tf = NoTransfer;
1222 assert!(tf.is_identity());
1223 assert_eq!(tf.to_linear(0.5), 0.5);
1224 assert_eq!(tf.from_linear(0.5), 0.5);
1225 }
1226
1227 #[test]
1228 fn srgb_not_identity() {
1229 let tf = Srgb;
1230 assert!(!tf.is_identity());
1231 let linear = tf.to_linear(0.5);
1233 assert!(
1234 (linear - 0.214).abs() < 0.01,
1235 "sRGB 0.5 → linear = {} (expected ~0.214)",
1236 linear
1237 );
1238 }
1239
1240 #[test]
1241 fn srgb_u8_to_f32_with_alpha() {
1242 let tf = Srgb;
1243 tf.build_luts();
1244
1245 let src = [128u8, 64, 32, 200];
1247 let mut dst = [0.0f32; 4];
1248
1249 tf.u8_to_linear_f32(&src, &mut dst, &(), 4, true, false);
1250
1251 assert!(dst[0] > 0.2 && dst[0] < 0.3, "R linear: {}", dst[0]);
1253 assert!((dst[3] - 200.0 / 255.0).abs() < 0.01, "A: {}", dst[3]);
1254 }
1255
1256 #[test]
1257 fn srgb_u8_premul_unpremul_roundtrip() {
1258 let tf = Srgb;
1259 tf.build_luts();
1260
1261 let src = [128u8, 64, 32, 200];
1262 let mut f32_buf = [0.0f32; 4];
1263 let mut out = [0u8; 4];
1264
1265 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 4, true, true);
1267 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 4, true, true);
1269
1270 for i in 0..4 {
1271 let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1272 assert!(
1273 diff <= 1,
1274 "Premul roundtrip off by {} at channel {}: {} vs {}",
1275 diff,
1276 i,
1277 src[i],
1278 out[i]
1279 );
1280 }
1281 }
1282
1283 #[test]
1284 fn no_alpha_3ch_roundtrip() {
1285 let tf = Srgb;
1286 tf.build_luts();
1287
1288 let src = [128u8, 64, 32, 200, 100, 50]; let mut f32_buf = [0.0f32; 6];
1290 let mut out = [0u8; 6];
1291
1292 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 3, false, false);
1293 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 3, false, false);
1294
1295 for i in 0..6 {
1296 let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1297 assert!(
1298 diff <= 1,
1299 "3ch roundtrip off by {} at {}: {} vs {}",
1300 diff,
1301 i,
1302 src[i],
1303 out[i]
1304 );
1305 }
1306 }
1307
1308 #[test]
1311 fn bt709_roundtrip_u8() {
1312 let tf = Bt709;
1313 tf.build_luts();
1314 let src: Vec<u8> = (0..=255).collect();
1315 let mut f32_buf = vec![0.0f32; 256];
1316 let mut out = vec![0u8; 256];
1317
1318 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1319 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1320
1321 for i in 0..256 {
1322 let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1323 assert!(diff <= 1, "BT.709 roundtrip off by {} at {}", diff, i);
1324 }
1325 }
1326
1327 #[test]
1328 fn bt709_monotonic() {
1329 let tf = Bt709;
1330 let mut prev = 0.0f32;
1331 for i in 0..=255u8 {
1332 let linear = tf.to_linear(i as f32 / 255.0);
1333 assert!(
1334 linear >= prev,
1335 "BT.709 to_linear not monotonic at {}: {} < {}",
1336 i,
1337 linear,
1338 prev
1339 );
1340 prev = linear;
1341 }
1342 }
1343
1344 #[test]
1345 fn bt709_endpoints() {
1346 let tf = Bt709;
1347 assert!((tf.to_linear(0.0)).abs() < 1e-7);
1348 assert!((tf.to_linear(1.0) - 1.0).abs() < 1e-5);
1349 assert!((tf.from_linear(0.0)).abs() < 1e-7);
1350 assert!((tf.from_linear(1.0) - 1.0).abs() < 1e-5);
1351 }
1352
1353 #[test]
1356 fn pq_roundtrip_u8() {
1357 let tf = Pq;
1358 tf.build_luts();
1359 let src: Vec<u8> = (0..=255).collect();
1360 let mut f32_buf = vec![0.0f32; 256];
1361 let mut out = vec![0u8; 256];
1362
1363 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1364 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1365
1366 for i in 0..256 {
1367 let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1368 assert!(diff <= 1, "PQ roundtrip off by {} at {}", diff, i);
1369 }
1370 }
1371
1372 #[test]
1373 fn pq_monotonic() {
1374 let tf = Pq;
1375 let mut prev = 0.0f32;
1376 for i in 0..=255u8 {
1377 let linear = tf.to_linear(i as f32 / 255.0);
1378 assert!(
1379 linear >= prev,
1380 "PQ to_linear not monotonic at {}: {} < {}",
1381 i,
1382 linear,
1383 prev
1384 );
1385 prev = linear;
1386 }
1387 }
1388
1389 #[test]
1390 fn pq_endpoints() {
1391 let tf = Pq;
1392 assert!((tf.to_linear(0.0)).abs() < 1e-7);
1393 assert!((tf.to_linear(1.0) - 1.0).abs() < 1e-4);
1394 assert!((tf.from_linear(0.0)).abs() < 1e-7);
1395 assert!((tf.from_linear(1.0) - 1.0).abs() < 1e-4);
1396 }
1397
1398 #[test]
1401 fn hlg_roundtrip_u8() {
1402 let tf = Hlg;
1403 tf.build_luts();
1404 let src: Vec<u8> = (0..=255).collect();
1405 let mut f32_buf = vec![0.0f32; 256];
1406 let mut out = vec![0u8; 256];
1407
1408 tf.u8_to_linear_f32(&src, &mut f32_buf, &(), 1, false, false);
1409 tf.linear_f32_to_u8(&f32_buf, &mut out, &(), 1, false, false);
1410
1411 for i in 0..256 {
1412 let diff = (src[i] as i16 - out[i] as i16).unsigned_abs();
1413 assert!(diff <= 1, "HLG roundtrip off by {} at {}", diff, i);
1414 }
1415 }
1416
1417 #[test]
1418 fn hlg_monotonic() {
1419 let tf = Hlg;
1420 let mut prev = 0.0f32;
1421 for i in 0..=255u8 {
1422 let linear = tf.to_linear(i as f32 / 255.0);
1423 assert!(
1424 linear >= prev,
1425 "HLG to_linear not monotonic at {}: {} < {}",
1426 i,
1427 linear,
1428 prev
1429 );
1430 prev = linear;
1431 }
1432 }
1433
1434 #[test]
1435 fn hlg_endpoints() {
1436 let tf = Hlg;
1437 assert!((tf.to_linear(0.0)).abs() < 1e-7);
1438 let at_one = tf.to_linear(1.0);
1441 assert!(at_one > 0.0, "HLG(1.0) should be positive: {}", at_one);
1442 let back = tf.from_linear(at_one);
1444 assert!(
1445 (back - 1.0).abs() < 1e-5,
1446 "HLG roundtrip at 1.0: {} -> {} -> {}",
1447 1.0,
1448 at_one,
1449 back
1450 );
1451 }
1452
1453 #[test]
1456 fn cicp_transfer_known_codes() {
1457 use zenpixels::TransferFunction;
1458 assert_eq!(
1459 TransferFunction::from_cicp(1),
1460 Some(TransferFunction::Bt709)
1461 );
1462 assert_eq!(TransferFunction::from_cicp(6), None);
1464 assert_eq!(
1465 TransferFunction::from_cicp(8),
1466 Some(TransferFunction::Linear)
1467 );
1468 assert_eq!(
1469 TransferFunction::from_cicp(13),
1470 Some(TransferFunction::Srgb)
1471 );
1472 assert_eq!(TransferFunction::from_cicp(16), Some(TransferFunction::Pq));
1473 assert_eq!(TransferFunction::from_cicp(18), Some(TransferFunction::Hlg));
1474 }
1475
1476 #[test]
1477 fn cicp_transfer_unknown_codes() {
1478 use zenpixels::TransferFunction;
1479 assert_eq!(TransferFunction::from_cicp(0), None);
1480 assert_eq!(TransferFunction::from_cicp(2), None);
1481 assert_eq!(TransferFunction::from_cicp(255), None);
1482 }
1483}