1use crate::pic_scale_error::PicScaleError;
31use crate::scaler::{Scaling, ScalingF32};
32use crate::support::check_image_size_overflow;
33use crate::{
34 ImageStore, ImageStoreMut, ImageStoreScaling, ResamplingFunction, Scaler, ScalingOptions,
35 ScalingU16, ThreadingPolicy,
36};
37use colorutils_rs::TransferFunction;
38
39#[derive(Debug, Copy, Clone)]
40pub struct LinearScaler {
47 pub(crate) scaler: Scaler,
48 pub(crate) transfer_function: TransferFunction,
49}
50
51impl LinearScaler {
52 pub fn new(filter: ResamplingFunction) -> Self {
54 LinearScaler {
55 scaler: Scaler::new(filter),
56 transfer_function: TransferFunction::Srgb,
57 }
58 }
59
60 pub fn new_with_transfer(
62 filter: ResamplingFunction,
63 transfer_function: TransferFunction,
64 ) -> Self {
65 LinearScaler {
66 scaler: Scaler::new(filter),
67 transfer_function,
68 }
69 }
70}
71
72struct Linearization {
73 linearization: Box<[f32; 256]>,
74 gamma: Box<[u8; 65536]>,
75}
76
77struct Linearization16 {
78 linearization: Box<[f32; 65536]>,
79 gamma: Box<[u16; 262144]>,
80}
81
82fn make_linearization16(
83 transfer_function: TransferFunction,
84 bit_depth: usize,
85) -> Result<Linearization16, PicScaleError> {
86 if bit_depth < 8 {
87 return Err(PicScaleError::UnsupportedBitDepth(bit_depth));
88 }
89 let mut linearizing = Box::new([0f32; 65536]);
90 let max_lin_depth = (1u32 << bit_depth) - 1;
91 let keep_max = 1u32 << bit_depth;
92 let mut gamma = Box::new([0u16; 262144]);
93
94 for (i, dst) in linearizing.iter_mut().take(keep_max as usize).enumerate() {
95 *dst = transfer_function.linearize(i as f32 / max_lin_depth as f32);
96 }
97
98 for (i, dst) in gamma.iter_mut().enumerate() {
99 *dst = (transfer_function.gamma(i as f32 / 262143.) * max_lin_depth as f32)
100 .round()
101 .min(max_lin_depth as f32) as u16;
102 }
103
104 Ok(Linearization16 {
105 linearization: linearizing,
106 gamma,
107 })
108}
109
110fn make_linearization(transfer_function: TransferFunction) -> Linearization {
111 let mut linearizing = Box::new([0f32; 256]);
112 let mut gamma = Box::new([0u8; 65536]);
113
114 for (i, dst) in linearizing.iter_mut().enumerate() {
115 *dst = transfer_function.linearize(i as f32 / 255.);
116 }
117
118 for (i, dst) in gamma.iter_mut().enumerate() {
119 *dst = (transfer_function.gamma(i as f32 / 65535.) * 255.)
120 .round()
121 .min(255.) as u8;
122 }
123
124 Linearization {
125 linearization: linearizing,
126 gamma,
127 }
128}
129
130fn resize_typical8<'a, const CN: usize>(
131 resampling_function: ResamplingFunction,
132 transfer_function: TransferFunction,
133 threading_policy: ThreadingPolicy,
134 store: &ImageStore<'a, u8, CN>,
135 into: &mut ImageStoreMut<'a, u8, CN>,
136) -> Result<(), PicScaleError>
137where
138 ImageStore<'a, f32, CN>: ImageStoreScaling<'a, f32, CN>,
139{
140 let new_size = into.get_size();
141 into.validate()?;
142 store.validate()?;
143 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
144 return Err(PicScaleError::ZeroImageDimensions);
145 }
146
147 if check_image_size_overflow(store.width, store.height, store.channels) {
148 return Err(PicScaleError::SourceImageIsTooLarge);
149 }
150
151 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
152 return Err(PicScaleError::DestinationImageIsTooLarge);
153 }
154
155 if store.width == new_size.width && store.height == new_size.height {
156 store.copied_to_mut(into);
157 return Ok(());
158 }
159
160 let mut target_vertical = vec![f32::default(); store.width * store.height * CN];
161
162 let mut linear_store =
163 ImageStoreMut::<f32, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
164
165 let linearization = make_linearization(transfer_function);
166
167 for (&src, dst) in store
168 .as_bytes()
169 .iter()
170 .zip(linear_store.buffer.borrow_mut())
171 {
172 *dst = linearization.linearization[src as usize];
173 }
174
175 let new_immutable_store = ImageStore::<f32, CN> {
176 buffer: std::borrow::Cow::Owned(target_vertical),
177 channels: CN,
178 width: store.width,
179 height: store.height,
180 stride: store.width * CN,
181 bit_depth: 12,
182 };
183
184 let mut new_store = ImageStoreMut::<f32, CN>::alloc_with_depth(into.width, into.height, 12);
185
186 new_immutable_store.scale(
187 &mut new_store,
188 ScalingOptions {
189 resampling_function,
190 threading_policy,
191 ..Default::default()
192 },
193 )?;
194
195 for (&src, dst) in new_store.as_bytes().iter().zip(into.buffer.borrow_mut()) {
196 let v = (src * 65535.).round().min(65535.).max(0.) as u16;
197 *dst = linearization.gamma[v as usize];
198 }
199
200 Ok(())
201}
202
203fn resize_typical16<'a, const CN: usize>(
204 resampling_function: ResamplingFunction,
205 transfer_function: TransferFunction,
206 threading_policy: ThreadingPolicy,
207 store: &ImageStore<'a, u16, CN>,
208 into: &mut ImageStoreMut<'a, u16, CN>,
209) -> Result<(), PicScaleError>
210where
211 ImageStore<'a, f32, CN>: ImageStoreScaling<'a, f32, CN>,
212{
213 let new_size = into.get_size();
214 into.validate()?;
215 store.validate()?;
216 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
217 return Err(PicScaleError::ZeroImageDimensions);
218 }
219
220 if check_image_size_overflow(store.width, store.height, store.channels) {
221 return Err(PicScaleError::SourceImageIsTooLarge);
222 }
223
224 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
225 return Err(PicScaleError::DestinationImageIsTooLarge);
226 }
227
228 if store.width == new_size.width && store.height == new_size.height {
229 store.copied_to_mut(into);
230 return Ok(());
231 }
232
233 let mut target_vertical = vec![f32::default(); store.width * store.height * CN];
234
235 let mut linear_store =
236 ImageStoreMut::<f32, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
237
238 let linearization = make_linearization16(transfer_function, into.bit_depth)?;
239
240 for (&src, dst) in store
241 .as_bytes()
242 .iter()
243 .zip(linear_store.buffer.borrow_mut())
244 {
245 *dst = linearization.linearization[src as usize];
246 }
247
248 let new_immutable_store = ImageStore::<f32, CN> {
249 buffer: std::borrow::Cow::Owned(target_vertical),
250 channels: CN,
251 width: store.width,
252 height: store.height,
253 stride: store.width * CN,
254 bit_depth: 16,
255 };
256
257 let mut new_store = ImageStoreMut::<f32, CN>::alloc(into.width, into.height);
258
259 new_immutable_store.scale(
260 &mut new_store,
261 ScalingOptions {
262 resampling_function,
263 threading_policy,
264 ..Default::default()
265 },
266 )?;
267
268 for (&src, dst) in new_store.as_bytes().iter().zip(into.buffer.borrow_mut()) {
269 let v = ((src * 262143.).round().max(0.) as u32).min(262143);
270 *dst = linearization.gamma[v as usize];
271 }
272
273 Ok(())
274}
275
276impl Scaling for LinearScaler {
277 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy) {
278 self.scaler.threading_policy = threading_policy;
279 }
280
281 fn resize_plane<'a>(
282 &'a self,
283 store: &ImageStore<'a, u8, 1>,
284 into: &mut ImageStoreMut<'a, u8, 1>,
285 ) -> Result<(), PicScaleError> {
286 resize_typical8(
287 self.scaler.function,
288 self.transfer_function,
289 self.scaler.threading_policy,
290 store,
291 into,
292 )
293 }
294
295 fn resize_cbcr8<'a>(
296 &'a self,
297 store: &ImageStore<'a, u8, 2>,
298 into: &mut ImageStoreMut<'a, u8, 2>,
299 ) -> Result<(), PicScaleError> {
300 resize_typical8(
301 self.scaler.function,
302 self.transfer_function,
303 self.scaler.threading_policy,
304 store,
305 into,
306 )
307 }
308
309 fn resize_gray_alpha<'a>(
310 &'a self,
311 store: &ImageStore<'a, u8, 2>,
312 into: &mut ImageStoreMut<'a, u8, 2>,
313 premultiply_alpha: bool,
314 ) -> Result<(), PicScaleError> {
315 let new_size = into.get_size();
316 into.validate()?;
317 store.validate()?;
318 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
319 return Err(PicScaleError::ZeroImageDimensions);
320 }
321
322 if check_image_size_overflow(store.width, store.height, store.channels) {
323 return Err(PicScaleError::SourceImageIsTooLarge);
324 }
325
326 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
327 return Err(PicScaleError::DestinationImageIsTooLarge);
328 }
329
330 if store.width == new_size.width && store.height == new_size.height {
331 store.copied_to_mut(into);
332 return Ok(());
333 }
334
335 const CN: usize = 2;
336
337 let mut target_vertical = vec![f32::default(); store.width * store.height * CN];
338
339 let mut linear_store =
340 ImageStoreMut::<f32, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
341
342 let linearization = make_linearization(self.transfer_function);
343
344 for (src, dst) in store
345 .as_bytes()
346 .chunks_exact(2)
347 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(2))
348 {
349 dst[0] = linearization.linearization[src[0] as usize];
350 dst[1] = src[1] as f32 / 255.;
351 }
352
353 let new_immutable_store = ImageStore::<f32, CN> {
354 buffer: std::borrow::Cow::Owned(target_vertical),
355 channels: CN,
356 width: store.width,
357 height: store.height,
358 stride: store.width * CN,
359 bit_depth: 12,
360 };
361
362 let mut new_store = ImageStoreMut::<f32, CN>::alloc(into.width, into.height);
363
364 self.scaler.resize_gray_alpha_f32(
365 &new_immutable_store,
366 &mut new_store,
367 premultiply_alpha,
368 )?;
369
370 for (src, dst) in new_store
371 .as_bytes()
372 .chunks_exact(2)
373 .zip(into.buffer.borrow_mut().chunks_exact_mut(2))
374 {
375 let v0 = (src[0] * 65535.).round().min(65535.).max(0.) as u16;
376
377 dst[0] = linearization.gamma[v0 as usize];
378 dst[1] = (src[1] * 255.).round().min(255.).max(0.) as u8;
379 }
380
381 Ok(())
382 }
383
384 fn resize_rgb<'a>(
385 &'a self,
386 store: &ImageStore<'a, u8, 3>,
387 into: &mut ImageStoreMut<'a, u8, 3>,
388 ) -> Result<(), PicScaleError> {
389 resize_typical8(
390 self.scaler.function,
391 self.transfer_function,
392 self.scaler.threading_policy,
393 store,
394 into,
395 )
396 }
397
398 fn resize_rgba<'a>(
399 &'a self,
400 store: &ImageStore<'a, u8, 4>,
401 into: &mut ImageStoreMut<'a, u8, 4>,
402 premultiply_alpha: bool,
403 ) -> Result<(), PicScaleError> {
404 let new_size = into.get_size();
405 into.validate()?;
406 store.validate()?;
407 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
408 return Err(PicScaleError::ZeroImageDimensions);
409 }
410
411 if check_image_size_overflow(store.width, store.height, store.channels) {
412 return Err(PicScaleError::SourceImageIsTooLarge);
413 }
414
415 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
416 return Err(PicScaleError::DestinationImageIsTooLarge);
417 }
418
419 if store.width == new_size.width && store.height == new_size.height {
420 store.copied_to_mut(into);
421 return Ok(());
422 }
423
424 const CN: usize = 4;
425
426 let mut target_vertical = vec![f32::default(); store.width * store.height * CN];
427
428 let mut linear_store =
429 ImageStoreMut::<f32, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
430
431 let linearization = make_linearization(self.transfer_function);
432
433 for (src, dst) in store
434 .as_bytes()
435 .chunks_exact(4)
436 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(4))
437 {
438 dst[0] = linearization.linearization[src[0] as usize];
439 dst[1] = linearization.linearization[src[1] as usize];
440 dst[2] = linearization.linearization[src[2] as usize];
441 dst[3] = src[3] as f32 / 255.;
442 }
443
444 let new_immutable_store = ImageStore::<f32, CN> {
445 buffer: std::borrow::Cow::Owned(target_vertical),
446 channels: CN,
447 width: store.width,
448 height: store.height,
449 stride: store.width * CN,
450 bit_depth: 12,
451 };
452
453 let mut new_store = ImageStoreMut::<f32, CN>::alloc(into.width, into.height);
454
455 self.scaler
456 .resize_rgba_f32(&new_immutable_store, &mut new_store, premultiply_alpha)?;
457
458 for (src, dst) in new_store
459 .as_bytes()
460 .chunks_exact(4)
461 .zip(into.buffer.borrow_mut().chunks_exact_mut(4))
462 {
463 let v0 = (src[0] * 65535.).round().min(65535.).max(0.) as u16;
464 let v1 = (src[1] * 65535.).round().min(65535.).max(0.) as u16;
465 let v2 = (src[2] * 65535.).round().min(65535.).max(0.) as u16;
466
467 dst[0] = linearization.gamma[v0 as usize];
468 dst[1] = linearization.gamma[v1 as usize];
469 dst[2] = linearization.gamma[v2 as usize];
470 dst[3] = (src[3] * 255.).round().min(255.).max(0.) as u8;
471 }
472
473 Ok(())
474 }
475}
476
477impl ScalingU16 for LinearScaler {
478 fn resize_plane_u16<'a>(
479 &'a self,
480 store: &ImageStore<'a, u16, 1>,
481 into: &mut ImageStoreMut<'a, u16, 1>,
482 ) -> Result<(), PicScaleError> {
483 resize_typical16(
484 self.scaler.function,
485 self.transfer_function,
486 self.scaler.threading_policy,
487 store,
488 into,
489 )
490 }
491
492 fn resize_cbcr_u16<'a>(
493 &'a self,
494 store: &ImageStore<'a, u16, 2>,
495 into: &mut ImageStoreMut<'a, u16, 2>,
496 ) -> Result<(), PicScaleError> {
497 resize_typical16(
498 self.scaler.function,
499 self.transfer_function,
500 self.scaler.threading_policy,
501 store,
502 into,
503 )
504 }
505
506 fn resize_gray_alpha16<'a>(
507 &'a self,
508 store: &ImageStore<'a, u16, 2>,
509 into: &mut ImageStoreMut<'a, u16, 2>,
510 premultiply_alpha: bool,
511 ) -> Result<(), PicScaleError> {
512 let new_size = into.get_size();
513 into.validate()?;
514 store.validate()?;
515 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
516 return Err(PicScaleError::ZeroImageDimensions);
517 }
518
519 if check_image_size_overflow(store.width, store.height, store.channels) {
520 return Err(PicScaleError::SourceImageIsTooLarge);
521 }
522
523 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
524 return Err(PicScaleError::DestinationImageIsTooLarge);
525 }
526
527 if store.width == new_size.width && store.height == new_size.height {
528 store.copied_to_mut(into);
529 return Ok(());
530 }
531
532 const CN: usize = 2;
533
534 let mut target_vertical = vec![f32::default(); store.width * store.height * CN];
535
536 let mut linear_store =
537 ImageStoreMut::<f32, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
538
539 let linearization = make_linearization16(self.transfer_function, into.bit_depth)?;
540
541 let max_bit_depth_value = ((1u32 << into.bit_depth) - 1) as f32;
542
543 let v_recip = 1. / max_bit_depth_value;
544
545 for (src, dst) in store
546 .as_bytes()
547 .chunks_exact(2)
548 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(2))
549 {
550 dst[0] = linearization.linearization[src[0] as usize];
551 dst[1] = src[1] as f32 * v_recip;
552 }
553
554 let new_immutable_store = ImageStore::<f32, CN> {
555 buffer: std::borrow::Cow::Owned(target_vertical),
556 channels: CN,
557 width: store.width,
558 height: store.height,
559 stride: store.width * CN,
560 bit_depth: 16,
561 };
562
563 let mut new_store = ImageStoreMut::<f32, CN>::alloc(into.width, into.height);
564
565 self.scaler.resize_gray_alpha_f32(
566 &new_immutable_store,
567 &mut new_store,
568 premultiply_alpha,
569 )?;
570
571 for (src, dst) in new_store
572 .as_bytes()
573 .chunks_exact(2)
574 .zip(into.buffer.borrow_mut().chunks_exact_mut(2))
575 {
576 let v0 = ((src[0] * 262143.).round().max(0.) as u32).min(262143);
577
578 dst[0] = linearization.gamma[v0 as usize];
579 dst[1] = (src[1] * max_bit_depth_value)
580 .round()
581 .min(max_bit_depth_value)
582 .max(0.) as u16;
583 }
584
585 Ok(())
586 }
587
588 fn resize_rgb_u16<'a>(
589 &'a self,
590 store: &ImageStore<'a, u16, 3>,
591 into: &mut ImageStoreMut<'a, u16, 3>,
592 ) -> Result<(), PicScaleError> {
593 resize_typical16(
594 self.scaler.function,
595 self.transfer_function,
596 self.scaler.threading_policy,
597 store,
598 into,
599 )
600 }
601
602 fn resize_rgba_u16<'a>(
603 &'a self,
604 store: &ImageStore<'a, u16, 4>,
605 into: &mut ImageStoreMut<'a, u16, 4>,
606 premultiply_alpha: bool,
607 ) -> Result<(), PicScaleError> {
608 let new_size = into.get_size();
609 into.validate()?;
610 store.validate()?;
611 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
612 return Err(PicScaleError::ZeroImageDimensions);
613 }
614
615 if check_image_size_overflow(store.width, store.height, store.channels) {
616 return Err(PicScaleError::SourceImageIsTooLarge);
617 }
618
619 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
620 return Err(PicScaleError::DestinationImageIsTooLarge);
621 }
622
623 if store.width == new_size.width && store.height == new_size.height {
624 store.copied_to_mut(into);
625 return Ok(());
626 }
627
628 const CN: usize = 4;
629
630 let mut target_vertical = vec![f32::default(); store.width * store.height * CN];
631
632 let mut linear_store =
633 ImageStoreMut::<f32, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
634
635 let linearization = make_linearization16(self.transfer_function, into.bit_depth)?;
636
637 let max_bit_depth_value = ((1u32 << into.bit_depth) - 1) as f32;
638
639 let v_recip = 1. / max_bit_depth_value;
640
641 for (src, dst) in store
642 .as_bytes()
643 .chunks_exact(4)
644 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(4))
645 {
646 dst[0] = linearization.linearization[src[0] as usize];
647 dst[1] = linearization.linearization[src[1] as usize];
648 dst[2] = linearization.linearization[src[2] as usize];
649 dst[3] = src[3] as f32 * v_recip;
650 }
651
652 let new_immutable_store = ImageStore::<f32, CN> {
653 buffer: std::borrow::Cow::Owned(target_vertical),
654 channels: CN,
655 width: store.width,
656 height: store.height,
657 stride: store.width * CN,
658 bit_depth: 16,
659 };
660
661 let mut new_store = ImageStoreMut::<f32, CN>::alloc(into.width, into.height);
662
663 self.scaler
664 .resize_rgba_f32(&new_immutable_store, &mut new_store, premultiply_alpha)?;
665
666 for (src, dst) in new_store
667 .as_bytes()
668 .chunks_exact(4)
669 .zip(into.buffer.borrow_mut().chunks_exact_mut(4))
670 {
671 let v0 = ((src[0] * 262143.).round().max(0.) as u32).min(262143);
672 let v1 = ((src[1] * 262143.).round().max(0.) as u32).min(262143);
673 let v2 = ((src[2] * 262143.).round().max(0.) as u32).min(262143);
674
675 dst[0] = linearization.gamma[v0 as usize];
676 dst[1] = linearization.gamma[v1 as usize];
677 dst[2] = linearization.gamma[v2 as usize];
678 dst[3] = (src[3] * max_bit_depth_value)
679 .round()
680 .min(max_bit_depth_value)
681 .max(0.) as u16;
682 }
683
684 Ok(())
685 }
686}