1use crate::pic_scale_error::PicScaleError;
31use crate::scaler::Scaling;
32use crate::support::check_image_size_overflow;
33use crate::{
34 ImageStore, ImageStoreMut, ImageStoreScaling, ResamplingFunction, Scaler, ScalingOptions,
35 ScalingU16, ThreadingPolicy,
36};
37use colorutils_rs::TransferFunction;
38
39#[derive(Debug, Copy, Clone)]
40pub struct LinearApproxScaler {
43 pub(crate) scaler: Scaler,
44 pub(crate) transfer_function: TransferFunction,
45}
46
47impl LinearApproxScaler {
48 pub fn new(filter: ResamplingFunction) -> Self {
50 LinearApproxScaler {
51 scaler: Scaler::new(filter),
52 transfer_function: TransferFunction::Srgb,
53 }
54 }
55
56 pub fn new_with_transfer(
58 filter: ResamplingFunction,
59 transfer_function: TransferFunction,
60 ) -> Self {
61 LinearApproxScaler {
62 scaler: Scaler::new(filter),
63 transfer_function,
64 }
65 }
66}
67
68struct Linearization {
69 linearization: Box<[u16; 256]>,
70 gamma: Box<[u8; 65536]>,
71}
72
73struct Linearization16 {
74 linearization: Box<[u16; 65536]>,
75 gamma: Box<[u16; 65536]>,
76}
77
78fn make_linearization(transfer_function: TransferFunction) -> Linearization {
79 let mut linearizing = Box::new([0u16; 256]);
80 let max_lin_depth = (1u32 << 12) - 1;
81 let mut gamma = Box::new([0u8; 65536]);
82
83 for (i, dst) in linearizing.iter_mut().enumerate() {
84 *dst = (transfer_function.linearize(i as f32 / 255.) * max_lin_depth as f32)
85 .round()
86 .min(max_lin_depth as f32) as u16;
87 }
88
89 let max_keep = 1u32 << 12;
90
91 for (i, dst) in gamma.iter_mut().take(max_keep as usize).enumerate() {
92 *dst = (transfer_function.gamma(i as f32 / max_lin_depth as f32) * 255.)
93 .round()
94 .min(255.) as u8;
95 }
96
97 Linearization {
98 linearization: linearizing,
99 gamma,
100 }
101}
102
103fn make_linearization16(
104 transfer_function: TransferFunction,
105 bit_depth: usize,
106) -> Result<Linearization16, PicScaleError> {
107 if bit_depth < 8 {
108 return Err(PicScaleError::UnsupportedBitDepth(bit_depth));
109 }
110 let mut linearizing = Box::new([0u16; 65536]);
111 let max_lin_depth = (1u32 << bit_depth) - 1;
112 let keep_max = 1u32 << bit_depth;
113 let mut gamma = Box::new([0u16; 65536]);
114
115 for (i, dst) in linearizing.iter_mut().take(keep_max as usize).enumerate() {
116 *dst = (transfer_function.linearize(i as f32 / max_lin_depth as f32) * 65535.)
117 .round()
118 .min(65535.) as u16;
119 }
120
121 for (i, dst) in gamma.iter_mut().enumerate() {
122 *dst = (transfer_function.gamma(i as f32 / 65535.) * max_lin_depth as f32)
123 .round()
124 .min(max_lin_depth as f32) as u16;
125 }
126
127 Ok(Linearization16 {
128 linearization: linearizing,
129 gamma,
130 })
131}
132
133fn resize_typical8<'a, const CN: usize>(
134 resampling_function: ResamplingFunction,
135 transfer_function: TransferFunction,
136 threading_policy: ThreadingPolicy,
137 store: &ImageStore<'a, u8, CN>,
138 into: &mut ImageStoreMut<'a, u8, CN>,
139) -> Result<(), PicScaleError>
140where
141 ImageStore<'a, u16, CN>: ImageStoreScaling<'a, u16, CN>,
142{
143 let new_size = into.get_size();
144 into.validate()?;
145 store.validate()?;
146 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
147 return Err(PicScaleError::ZeroImageDimensions);
148 }
149
150 if check_image_size_overflow(store.width, store.height, store.channels) {
151 return Err(PicScaleError::SourceImageIsTooLarge);
152 }
153
154 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
155 return Err(PicScaleError::DestinationImageIsTooLarge);
156 }
157
158 if store.width == new_size.width && store.height == new_size.height {
159 store.copied_to_mut(into);
160 return Ok(());
161 }
162
163 let mut target_vertical = vec![u16::default(); store.width * store.height * CN];
164
165 let mut linear_store =
166 ImageStoreMut::<u16, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
167 linear_store.bit_depth = 12;
168
169 let linearization = make_linearization(transfer_function);
170
171 for (&src, dst) in store
172 .as_bytes()
173 .iter()
174 .zip(linear_store.buffer.borrow_mut())
175 {
176 *dst = linearization.linearization[src as usize];
177 }
178
179 let new_immutable_store = ImageStore::<u16, CN> {
180 buffer: std::borrow::Cow::Owned(target_vertical),
181 channels: CN,
182 width: store.width,
183 height: store.height,
184 stride: store.width * CN,
185 bit_depth: 12,
186 };
187
188 let mut new_store = ImageStoreMut::<u16, CN>::alloc_with_depth(into.width, into.height, 12);
189
190 new_immutable_store.scale(
191 &mut new_store,
192 ScalingOptions {
193 resampling_function,
194 threading_policy,
195 ..Default::default()
196 },
197 )?;
198
199 for (&src, dst) in new_store.as_bytes().iter().zip(into.buffer.borrow_mut()) {
200 *dst = linearization.gamma[src as usize];
201 }
202
203 Ok(())
204}
205
206impl Scaling for LinearApproxScaler {
207 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy) {
208 self.scaler.threading_policy = threading_policy;
209 }
210
211 fn resize_plane<'a>(
212 &'a self,
213 store: &ImageStore<'a, u8, 1>,
214 into: &mut ImageStoreMut<'a, u8, 1>,
215 ) -> Result<(), PicScaleError> {
216 resize_typical8(
217 self.scaler.function,
218 self.transfer_function,
219 self.scaler.threading_policy,
220 store,
221 into,
222 )
223 }
224
225 fn resize_cbcr8<'a>(
226 &'a self,
227 store: &ImageStore<'a, u8, 2>,
228 into: &mut ImageStoreMut<'a, u8, 2>,
229 ) -> Result<(), PicScaleError> {
230 resize_typical8(
231 self.scaler.function,
232 self.transfer_function,
233 self.scaler.threading_policy,
234 store,
235 into,
236 )
237 }
238
239 fn resize_gray_alpha<'a>(
240 &'a self,
241 store: &ImageStore<'a, u8, 2>,
242 into: &mut ImageStoreMut<'a, u8, 2>,
243 premultiply_alpha: bool,
244 ) -> Result<(), PicScaleError> {
245 let new_size = into.get_size();
246 into.validate()?;
247 store.validate()?;
248 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
249 return Err(PicScaleError::ZeroImageDimensions);
250 }
251
252 if check_image_size_overflow(store.width, store.height, store.channels) {
253 return Err(PicScaleError::SourceImageIsTooLarge);
254 }
255
256 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
257 return Err(PicScaleError::DestinationImageIsTooLarge);
258 }
259
260 if store.width == new_size.width && store.height == new_size.height {
261 store.copied_to_mut(into);
262 return Ok(());
263 }
264
265 const CN: usize = 2;
266
267 let mut target_vertical = vec![u16::default(); store.width * store.height * CN];
268
269 let mut linear_store =
270 ImageStoreMut::<u16, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
271 linear_store.bit_depth = 12;
272
273 let linearization = make_linearization(self.transfer_function);
274
275 for (src, dst) in store
276 .as_bytes()
277 .chunks_exact(2)
278 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(2))
279 {
280 dst[0] = linearization.linearization[src[0] as usize];
281 dst[1] = ((src[1] as u16) << 4) | ((src[1] as u16) >> 4);
282 }
283
284 let new_immutable_store = ImageStore::<u16, CN> {
285 buffer: std::borrow::Cow::Owned(target_vertical),
286 channels: CN,
287 width: store.width,
288 height: store.height,
289 stride: store.width * CN,
290 bit_depth: 12,
291 };
292
293 let mut new_store = ImageStoreMut::<u16, CN>::alloc_with_depth(into.width, into.height, 12);
294
295 self.scaler
296 .resize_gray_alpha16(&new_immutable_store, &mut new_store, premultiply_alpha)?;
297
298 for (src, dst) in new_store
299 .as_bytes()
300 .chunks_exact(2)
301 .zip(into.buffer.borrow_mut().chunks_exact_mut(2))
302 {
303 dst[0] = linearization.gamma[src[0] as usize];
304 dst[1] = (src[1] >> 4).min(255) as u8;
305 }
306
307 Ok(())
308 }
309
310 fn resize_rgb<'a>(
311 &self,
312 store: &ImageStore<'a, u8, 3>,
313 into: &mut ImageStoreMut<'a, u8, 3>,
314 ) -> Result<(), PicScaleError> {
315 resize_typical8(
316 self.scaler.function,
317 self.transfer_function,
318 self.scaler.threading_policy,
319 store,
320 into,
321 )
322 }
323
324 fn resize_rgba<'a>(
325 &self,
326 store: &ImageStore<'a, u8, 4>,
327 into: &mut ImageStoreMut<'a, u8, 4>,
328 premultiply_alpha: bool,
329 ) -> Result<(), PicScaleError> {
330 let new_size = into.get_size();
331 into.validate()?;
332 store.validate()?;
333 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
334 return Err(PicScaleError::ZeroImageDimensions);
335 }
336
337 if check_image_size_overflow(store.width, store.height, store.channels) {
338 return Err(PicScaleError::SourceImageIsTooLarge);
339 }
340
341 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
342 return Err(PicScaleError::DestinationImageIsTooLarge);
343 }
344
345 if store.width == new_size.width && store.height == new_size.height {
346 store.copied_to_mut(into);
347 return Ok(());
348 }
349
350 const CN: usize = 4;
351
352 let mut target_vertical = vec![u16::default(); store.width * store.height * CN];
353
354 let mut linear_store =
355 ImageStoreMut::<u16, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
356 linear_store.bit_depth = 12;
357
358 let linearization = make_linearization(self.transfer_function);
359
360 for (src, dst) in store
361 .as_bytes()
362 .chunks_exact(4)
363 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(4))
364 {
365 dst[0] = linearization.linearization[src[0] as usize];
366 dst[1] = linearization.linearization[src[1] as usize];
367 dst[2] = linearization.linearization[src[2] as usize];
368 dst[3] = ((src[3] as u16) << 4) | ((src[3] as u16) >> 4);
369 }
370
371 let new_immutable_store = ImageStore::<u16, CN> {
372 buffer: std::borrow::Cow::Owned(target_vertical),
373 channels: CN,
374 width: store.width,
375 height: store.height,
376 stride: store.width * CN,
377 bit_depth: 12,
378 };
379
380 let mut new_store = ImageStoreMut::<u16, CN>::alloc_with_depth(into.width, into.height, 12);
381
382 self.scaler
383 .resize_rgba_u16(&new_immutable_store, &mut new_store, premultiply_alpha)?;
384
385 for (src, dst) in new_store
386 .as_bytes()
387 .chunks_exact(4)
388 .zip(into.buffer.borrow_mut().chunks_exact_mut(4))
389 {
390 dst[0] = linearization.gamma[src[0] as usize];
391 dst[1] = linearization.gamma[src[1] as usize];
392 dst[2] = linearization.gamma[src[2] as usize];
393 dst[3] = (src[3] >> 4).min(255) as u8;
394 }
395
396 Ok(())
397 }
398}
399
400fn resize_typical16<'a, const CN: usize>(
401 resampling_function: ResamplingFunction,
402 transfer_function: TransferFunction,
403 threading_policy: ThreadingPolicy,
404 store: &ImageStore<'a, u16, CN>,
405 into: &mut ImageStoreMut<'a, u16, CN>,
406) -> Result<(), PicScaleError>
407where
408 ImageStore<'a, u16, CN>: ImageStoreScaling<'a, u16, CN>,
409{
410 let new_size = into.get_size();
411 into.validate()?;
412 store.validate()?;
413 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
414 return Err(PicScaleError::ZeroImageDimensions);
415 }
416
417 if check_image_size_overflow(store.width, store.height, store.channels) {
418 return Err(PicScaleError::SourceImageIsTooLarge);
419 }
420
421 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
422 return Err(PicScaleError::DestinationImageIsTooLarge);
423 }
424
425 if store.width == new_size.width && store.height == new_size.height {
426 store.copied_to_mut(into);
427 return Ok(());
428 }
429
430 let mut target_vertical = vec![u16::default(); store.width * store.height * CN];
431
432 let mut linear_store =
433 ImageStoreMut::<u16, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
434 linear_store.bit_depth = 16;
435
436 let linearization = make_linearization16(transfer_function, into.bit_depth)?;
437
438 for (&src, dst) in store
439 .as_bytes()
440 .iter()
441 .zip(linear_store.buffer.borrow_mut())
442 {
443 *dst = linearization.linearization[src as usize];
444 }
445
446 let new_immutable_store = ImageStore::<u16, CN> {
447 buffer: std::borrow::Cow::Owned(target_vertical),
448 channels: CN,
449 width: store.width,
450 height: store.height,
451 stride: store.width * CN,
452 bit_depth: 16,
453 };
454
455 let mut new_store = ImageStoreMut::<u16, CN>::alloc_with_depth(into.width, into.height, 16);
456
457 new_immutable_store.scale(
458 &mut new_store,
459 ScalingOptions {
460 resampling_function,
461 threading_policy,
462 ..Default::default()
463 },
464 )?;
465
466 for (&src, dst) in new_store.as_bytes().iter().zip(into.buffer.borrow_mut()) {
467 *dst = linearization.gamma[src as usize];
468 }
469
470 Ok(())
471}
472
473impl ScalingU16 for LinearApproxScaler {
474 fn resize_plane_u16<'a>(
475 &'a self,
476 store: &ImageStore<'a, u16, 1>,
477 into: &mut ImageStoreMut<'a, u16, 1>,
478 ) -> Result<(), PicScaleError> {
479 resize_typical16(
480 self.scaler.function,
481 self.transfer_function,
482 self.scaler.threading_policy,
483 store,
484 into,
485 )
486 }
487
488 fn resize_cbcr_u16<'a>(
489 &'a self,
490 store: &ImageStore<'a, u16, 2>,
491 into: &mut ImageStoreMut<'a, u16, 2>,
492 ) -> Result<(), PicScaleError> {
493 resize_typical16(
494 self.scaler.function,
495 self.transfer_function,
496 self.scaler.threading_policy,
497 store,
498 into,
499 )
500 }
501
502 fn resize_gray_alpha16<'a>(
503 &'a self,
504 store: &ImageStore<'a, u16, 2>,
505 into: &mut ImageStoreMut<'a, u16, 2>,
506 premultiply_alpha: bool,
507 ) -> Result<(), PicScaleError> {
508 let new_size = into.get_size();
509 into.validate()?;
510 store.validate()?;
511 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
512 return Err(PicScaleError::ZeroImageDimensions);
513 }
514
515 if check_image_size_overflow(store.width, store.height, store.channels) {
516 return Err(PicScaleError::SourceImageIsTooLarge);
517 }
518
519 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
520 return Err(PicScaleError::DestinationImageIsTooLarge);
521 }
522
523 if store.width == new_size.width && store.height == new_size.height {
524 store.copied_to_mut(into);
525 return Ok(());
526 }
527
528 const CN: usize = 2;
529
530 let mut target_vertical = vec![u16::default(); store.width * store.height * CN];
531
532 let mut linear_store =
533 ImageStoreMut::<u16, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
534 linear_store.bit_depth = 16;
535
536 let linearization = make_linearization16(self.transfer_function, into.bit_depth)?;
537
538 let max_bit_depth_value = ((1u32 << into.bit_depth) - 1) as f32;
539
540 let a_f_scale = 65535. / max_bit_depth_value;
541
542 for (src, dst) in store
543 .as_bytes()
544 .chunks_exact(2)
545 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(2))
546 {
547 dst[0] = linearization.linearization[src[0] as usize];
548 dst[1] = (src[1] as f32 * a_f_scale).round().min(65535.) as u16;
549 }
550
551 let new_immutable_store = ImageStore::<u16, CN> {
552 buffer: std::borrow::Cow::Owned(target_vertical),
553 channels: CN,
554 width: store.width,
555 height: store.height,
556 stride: store.width * CN,
557 bit_depth: 16,
558 };
559
560 let mut new_store = ImageStoreMut::<u16, CN>::alloc_with_depth(into.width, into.height, 16);
561
562 self.scaler
563 .resize_gray_alpha16(&new_immutable_store, &mut new_store, premultiply_alpha)?;
564
565 let a_r_scale = max_bit_depth_value / 65535.;
566
567 for (src, dst) in new_store
568 .as_bytes()
569 .chunks_exact(2)
570 .zip(into.buffer.borrow_mut().chunks_exact_mut(2))
571 {
572 dst[0] = linearization.gamma[src[0] as usize];
573 dst[1] = (src[1] as f32 * a_r_scale).round().min(max_bit_depth_value) as u16;
574 }
575
576 Ok(())
577 }
578
579 fn resize_rgb_u16<'a>(
580 &'a self,
581 store: &ImageStore<'a, u16, 3>,
582 into: &mut ImageStoreMut<'a, u16, 3>,
583 ) -> Result<(), PicScaleError> {
584 resize_typical16(
585 self.scaler.function,
586 self.transfer_function,
587 self.scaler.threading_policy,
588 store,
589 into,
590 )
591 }
592
593 fn resize_rgba_u16<'a>(
594 &'a self,
595 store: &ImageStore<'a, u16, 4>,
596 into: &mut ImageStoreMut<'a, u16, 4>,
597 premultiply_alpha: bool,
598 ) -> Result<(), PicScaleError> {
599 let new_size = into.get_size();
600 into.validate()?;
601 store.validate()?;
602 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
603 return Err(PicScaleError::ZeroImageDimensions);
604 }
605
606 if check_image_size_overflow(store.width, store.height, store.channels) {
607 return Err(PicScaleError::SourceImageIsTooLarge);
608 }
609
610 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
611 return Err(PicScaleError::DestinationImageIsTooLarge);
612 }
613
614 if store.width == new_size.width && store.height == new_size.height {
615 store.copied_to_mut(into);
616 return Ok(());
617 }
618
619 const CN: usize = 4;
620
621 let mut target_vertical = vec![u16::default(); store.width * store.height * CN];
622
623 let mut linear_store =
624 ImageStoreMut::<u16, CN>::from_slice(&mut target_vertical, store.width, store.height)?;
625 linear_store.bit_depth = 16;
626
627 let linearization = make_linearization16(self.transfer_function, into.bit_depth)?;
628
629 let max_bit_depth_value = ((1u32 << into.bit_depth) - 1) as f32;
630
631 let a_f_scale = 65535. / max_bit_depth_value;
632
633 for (src, dst) in store
634 .as_bytes()
635 .chunks_exact(4)
636 .zip(linear_store.buffer.borrow_mut().chunks_exact_mut(4))
637 {
638 dst[0] = linearization.linearization[src[0] as usize];
639 dst[1] = linearization.linearization[src[1] as usize];
640 dst[2] = linearization.linearization[src[2] as usize];
641 dst[3] = (src[3] as f32 * a_f_scale).round().min(65535.) as u16;
642 }
643
644 let new_immutable_store = ImageStore::<u16, CN> {
645 buffer: std::borrow::Cow::Owned(target_vertical),
646 channels: CN,
647 width: store.width,
648 height: store.height,
649 stride: store.width * CN,
650 bit_depth: 16,
651 };
652
653 let mut new_store = ImageStoreMut::<u16, CN>::alloc_with_depth(into.width, into.height, 16);
654
655 self.scaler
656 .resize_rgba_u16(&new_immutable_store, &mut new_store, premultiply_alpha)?;
657
658 let a_r_scale = max_bit_depth_value / 65535.;
659
660 for (src, dst) in new_store
661 .as_bytes()
662 .chunks_exact(4)
663 .zip(into.buffer.borrow_mut().chunks_exact_mut(4))
664 {
665 dst[0] = linearization.gamma[src[0] as usize];
666 dst[1] = linearization.gamma[src[1] as usize];
667 dst[2] = linearization.gamma[src[2] as usize];
668 dst[3] = (src[3] as f32 * a_r_scale).round().min(max_bit_depth_value) as u16;
669 }
670
671 Ok(())
672 }
673}