1use crate::ar30::{Ar30ByteOrder, Rgb30};
30use crate::convolution::{ConvolutionOptions, HorizontalConvolutionPass, VerticalConvolutionPass};
31use crate::filter_weights::{FilterBounds, FilterWeights};
32use crate::image_size::ImageSize;
33use crate::image_store::{
34 AssociateAlpha, CheckStoreDensity, ImageStore, ImageStoreMut, UnassociateAlpha,
35};
36use crate::nearest_sampler::resize_nearest;
37use crate::pic_scale_error::PicScaleError;
38use crate::resize_ar30::resize_ar30_impl;
39use crate::support::check_image_size_overflow;
40use crate::threading_policy::ThreadingPolicy;
41use crate::{
42 CbCr16ImageStore, CbCr8ImageStore, CbCrF32ImageStore, ConstPI, ConstSqrt2, Jinc,
43 Planar16ImageStore, Planar8ImageStore, PlanarF32ImageStore, ResamplingFunction,
44 Rgb16ImageStore, Rgb8ImageStore, RgbF32ImageStore, Rgba16ImageStore, Rgba8ImageStore,
45 RgbaF32ImageStore,
46};
47use num_traits::{AsPrimitive, Float, Signed};
48use rayon::ThreadPool;
49use std::fmt::Debug;
50use std::ops::{AddAssign, MulAssign, Neg};
51
52#[derive(Debug, Copy, Clone)]
53pub struct Scaler {
55 pub(crate) function: ResamplingFunction,
56 pub(crate) threading_policy: ThreadingPolicy,
57 pub workload_strategy: WorkloadStrategy,
58}
59
60pub trait Scaling {
61 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy);
74
75 fn resize_cbcr8<'a>(
90 &'a self,
91 store: &ImageStore<'a, u8, 2>,
92 into: &mut ImageStoreMut<'a, u8, 2>,
93 ) -> Result<(), PicScaleError>;
94
95 fn resize_rgb<'a>(
108 &'a self,
109 store: &ImageStore<'a, u8, 3>,
110 into: &mut ImageStoreMut<'a, u8, 3>,
111 ) -> Result<(), PicScaleError>;
112
113 fn resize_rgba<'a>(
129 &'a self,
130 store: &ImageStore<'a, u8, 4>,
131 into: &mut ImageStoreMut<'a, u8, 4>,
132 premultiply_alpha: bool,
133 ) -> Result<(), PicScaleError>;
134}
135
136pub trait ScalingF32 {
137 fn resize_cbcr_f32<'a>(
152 &'a self,
153 store: &ImageStore<'a, f32, 2>,
154 into: &mut ImageStoreMut<'a, f32, 2>,
155 ) -> Result<(), PicScaleError>;
156
157 fn resize_rgb_f32<'a>(
172 &'a self,
173 store: &ImageStore<'a, f32, 3>,
174 into: &mut ImageStoreMut<'a, f32, 3>,
175 ) -> Result<(), PicScaleError>;
176
177 fn resize_rgba_f32<'a>(
193 &'a self,
194 store: &ImageStore<'a, f32, 4>,
195 into: &mut ImageStoreMut<'a, f32, 4>,
196 premultiply_alpha: bool,
197 ) -> Result<(), PicScaleError>;
198}
199
200#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Default)]
201pub enum WorkloadStrategy {
202 PreferQuality,
204 #[default]
206 PreferSpeed,
207}
208
209pub trait ScalingU16 {
210 fn resize_plane_u16<'a>(
234 &'a self,
235 store: &ImageStore<'a, u16, 1>,
236 into: &mut ImageStoreMut<'a, u16, 1>,
237 ) -> Result<(), PicScaleError>;
238
239 fn resize_cbcr_u16<'a>(
265 &'a self,
266 store: &ImageStore<'a, u16, 2>,
267 into: &mut ImageStoreMut<'a, u16, 2>,
268 ) -> Result<(), PicScaleError>;
269
270 fn resize_rgb_u16<'a>(
296 &'a self,
297 store: &ImageStore<'a, u16, 3>,
298 into: &mut ImageStoreMut<'a, u16, 3>,
299 ) -> Result<(), PicScaleError>;
300
301 fn resize_rgba_u16<'a>(
329 &'a self,
330 store: &ImageStore<'a, u16, 4>,
331 into: &mut ImageStoreMut<'a, u16, 4>,
332 premultiply_alpha: bool,
333 ) -> Result<(), PicScaleError>;
334}
335
336impl Scaler {
337 pub fn new(filter: ResamplingFunction) -> Self {
342 Scaler {
343 function: filter,
344 threading_policy: ThreadingPolicy::Single,
345 workload_strategy: WorkloadStrategy::default(),
346 }
347 }
348
349 pub fn set_workload_strategy(&mut self, workload_strategy: WorkloadStrategy) {
350 self.workload_strategy = workload_strategy;
351 }
352
353 pub(crate) fn generate_weights<T>(&self, in_size: usize, out_size: usize) -> FilterWeights<T>
354 where
355 T: Copy
356 + Neg
357 + Signed
358 + Float
359 + 'static
360 + ConstPI
361 + MulAssign<T>
362 + AddAssign<T>
363 + AsPrimitive<f64>
364 + AsPrimitive<usize>
365 + Jinc<T>
366 + ConstSqrt2
367 + Default
368 + AsPrimitive<i32>,
369 f32: AsPrimitive<T>,
370 f64: AsPrimitive<T>,
371 i64: AsPrimitive<T>,
372 i32: AsPrimitive<T>,
373 usize: AsPrimitive<T>,
374 {
375 let resampling_filter = self.function.get_resampling_filter();
376 let scale = in_size.as_() / out_size.as_();
377 let is_resizable_kernel = resampling_filter.is_resizable_kernel;
378 let filter_scale_cutoff = match is_resizable_kernel {
379 true => scale.max(1f32.as_()),
380 false => 1f32.as_(),
381 };
382 let filter_base_size = resampling_filter.min_kernel_size;
383 let resampling_function = resampling_filter.kernel;
384
385 let is_area = resampling_filter.is_area && scale < 1.as_();
386
387 let mut bounds: Vec<FilterBounds> = vec![FilterBounds::new(0, 0); out_size];
388
389 if !is_area {
390 let window_func = resampling_filter.window;
391 let base_size: usize = (filter_base_size.as_() * filter_scale_cutoff).round().as_();
392 let kernel_size = base_size;
393 let filter_radius = base_size.as_() / 2.as_();
394 let filter_scale = 1f32.as_() / filter_scale_cutoff;
395 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
396 let mut local_filters = vec![T::default(); kernel_size];
397 let mut filter_position = 0usize;
398 let blur_scale = match window_func {
399 None => 1f32.as_(),
400 Some(window) => {
401 if window.blur.as_() > 0f32.as_() {
402 1f32.as_() / window.blur.as_()
403 } else {
404 0f32.as_()
405 }
406 }
407 };
408 for (i, bound) in bounds.iter_mut().enumerate() {
409 let center_x = ((i.as_() + 0.5.as_()) * scale).min(in_size.as_());
410 let mut weights_sum: T = 0f32.as_();
411 let mut local_filter_iteration = 0usize;
412
413 let start: usize = (center_x - filter_radius).floor().max(0f32.as_()).as_();
414 let end: usize = (center_x + filter_radius)
415 .ceil()
416 .min(start.as_() + kernel_size.as_())
417 .min(in_size.as_())
418 .as_();
419
420 let center = center_x - 0.5.as_();
421
422 for k in start..end {
423 let dx = k.as_() - center;
424 let weight;
425 if let Some(resampling_window) = window_func {
426 let mut x = dx.abs();
427 x = if resampling_window.blur.as_() > 0f32.as_() {
428 x * blur_scale
429 } else {
430 x
431 };
432 x = if x <= resampling_window.taper.as_() {
433 0f32.as_()
434 } else {
435 (x - resampling_window.taper.as_())
436 / (1f32.as_() - resampling_window.taper.as_())
437 };
438 let window_producer = resampling_window.window;
439 let x_kernel_scaled = x * filter_scale;
440 let window = if x < resampling_window.window_size.as_() {
441 window_producer(x_kernel_scaled * resampling_window.window_size.as_())
442 } else {
443 0f32.as_()
444 };
445 weight = window * resampling_function(x_kernel_scaled);
446 } else {
447 let dx = dx.abs();
448 weight = resampling_function(dx * filter_scale);
449 }
450 weights_sum += weight;
451 unsafe {
452 *local_filters.get_unchecked_mut(local_filter_iteration) = weight;
453 }
454 local_filter_iteration += 1;
455 }
456
457 let alpha: T = 0.7f32.as_();
458 if resampling_filter.is_ewa && !local_filters.is_empty() {
459 weights_sum = unsafe { *local_filters.get_unchecked(0) };
460 for j in 1..local_filter_iteration {
461 let new_weight = alpha * unsafe { *local_filters.get_unchecked(j) }
462 + (1f32.as_() - alpha) * unsafe { *local_filters.get_unchecked(j - 1) };
463 unsafe {
464 *local_filters.get_unchecked_mut(j) = new_weight;
465 }
466 weights_sum += new_weight;
467 }
468 }
469
470 let size = end - start;
471
472 *bound = FilterBounds::new(start, size);
473
474 if weights_sum != 0f32.as_() {
475 let recpeq = 1f32.as_() / weights_sum;
476
477 for (dst, src) in weights
478 .iter_mut()
479 .skip(filter_position)
480 .take(size)
481 .zip(local_filters.iter().take(size))
482 {
483 *dst = *src * recpeq;
484 }
485 }
486
487 filter_position += kernel_size;
488 }
489
490 FilterWeights::<T>::new(
491 weights,
492 kernel_size,
493 kernel_size,
494 out_size,
495 filter_radius.as_(),
496 bounds,
497 )
498 } else {
499 let inv_scale: T = 1.as_() / scale;
503 let kernel_size = 2;
504 let filter_radius: T = 1.as_();
505 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
506 let mut local_filters = vec![T::default(); kernel_size];
507 let mut filter_position = 0usize;
508
509 for (i, bound) in bounds.iter_mut().enumerate() {
510 let mut weights_sum: T = 0f32.as_();
511
512 let sx: T = (i.as_() * scale).floor();
513 let fx = (i as i64 + 1).as_() - (sx + 1.as_()) * inv_scale;
514 let dx = if fx <= 0.as_() {
515 0.as_()
516 } else {
517 fx - fx.floor()
518 };
519 let dx = dx.abs();
520 let weight0 = 1.as_() - dx;
521 let weight1: T = dx;
522 local_filters[0] = weight0;
523 local_filters[1] = weight1;
524
525 let start: usize = sx.floor().max(0f32.as_()).as_();
526 let end: usize = (sx + kernel_size.as_())
527 .ceil()
528 .min(start.as_() + kernel_size.as_())
529 .min(in_size.as_())
530 .as_();
531
532 let size = end - start;
533
534 weights_sum += weight0;
535 if size > 1 {
536 weights_sum += weight1;
537 }
538 *bound = FilterBounds::new(start, size);
539
540 if weights_sum != 0f32.as_() {
541 let recpeq = 1f32.as_() / weights_sum;
542
543 for (dst, src) in weights
544 .iter_mut()
545 .skip(filter_position)
546 .take(size)
547 .zip(local_filters.iter().take(size))
548 {
549 *dst = *src * recpeq;
550 }
551 } else {
552 weights[filter_position] = 1.as_();
553 }
554
555 filter_position += kernel_size;
556 }
557
558 FilterWeights::new(
559 weights,
560 kernel_size,
561 kernel_size,
562 out_size,
563 filter_radius.as_(),
564 bounds,
565 )
566 }
567 }
568}
569
570impl Scaler {
571 pub(crate) fn generic_resize<
572 'a,
573 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
574 const N: usize,
575 >(
576 &self,
577 store: &ImageStore<'a, T, N>,
578 into: &mut ImageStoreMut<'a, T, N>,
579 ) -> Result<(), PicScaleError>
580 where
581 ImageStore<'a, T, N>: VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N>,
582 ImageStoreMut<'a, T, N>: CheckStoreDensity,
583 {
584 let new_size = into.get_size();
585 into.validate()?;
586 store.validate()?;
587 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
588 return Err(PicScaleError::ZeroImageDimensions);
589 }
590
591 if check_image_size_overflow(store.width, store.height, store.channels) {
592 return Err(PicScaleError::SourceImageIsTooLarge);
593 }
594
595 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
596 return Err(PicScaleError::DestinationImageIsTooLarge);
597 }
598
599 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
600 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
601 }
602
603 if store.width == new_size.width && store.height == new_size.height {
604 store.copied_to_mut(into);
605 return Ok(());
606 }
607
608 let pool = self
609 .threading_policy
610 .get_pool(ImageSize::new(new_size.width, new_size.height));
611
612 if self.function == ResamplingFunction::Nearest {
613 resize_nearest::<T, N>(
614 store.buffer.as_ref(),
615 store.width,
616 store.height,
617 into.buffer.borrow_mut(),
618 new_size.width,
619 new_size.height,
620 &pool,
621 );
622 return Ok(());
623 }
624
625 let should_do_horizontal = store.width != new_size.width;
626 let should_do_vertical = store.height != new_size.height;
627 assert!(should_do_horizontal || should_do_vertical);
628
629 if should_do_vertical && should_do_horizontal {
630 let mut target_vertical = vec![T::default(); store.width * new_size.height * N];
631
632 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
633 &mut target_vertical,
634 store.width,
635 new_size.height,
636 )?;
637 new_image_vertical.bit_depth = into.bit_depth;
638 let vertical_filters = self.generate_weights(store.height, new_size.height);
639 let options = ConvolutionOptions::new(self.workload_strategy);
640 store.convolve_vertical(vertical_filters, &mut new_image_vertical, &pool, options);
641
642 let new_immutable_store = ImageStore::<T, N> {
643 buffer: std::borrow::Cow::Owned(target_vertical),
644 channels: N,
645 width: store.width,
646 height: new_size.height,
647 stride: store.width * N,
648 bit_depth: into.bit_depth,
649 };
650 let horizontal_filters = self.generate_weights(store.width, new_size.width);
651 let options = ConvolutionOptions::new(self.workload_strategy);
652 new_immutable_store.convolve_horizontal(horizontal_filters, into, &pool, options);
653 Ok(())
654 } else if should_do_vertical {
655 let vertical_filters = self.generate_weights(store.height, new_size.height);
656 let options = ConvolutionOptions::new(self.workload_strategy);
657 store.convolve_vertical(vertical_filters, into, &pool, options);
658 Ok(())
659 } else {
660 assert!(should_do_horizontal);
661 let horizontal_filters = self.generate_weights(store.width, new_size.width);
662 let options = ConvolutionOptions::new(self.workload_strategy);
663 store.convolve_horizontal(horizontal_filters, into, &pool, options);
664 Ok(())
665 }
666 }
667
668 fn forward_resize_with_alpha<
669 'a,
670 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
671 const N: usize,
672 >(
673 &self,
674 store: &ImageStore<'a, T, N>,
675 into: &mut ImageStoreMut<'a, T, N>,
676 premultiply_alpha_requested: bool,
677 pool: &Option<ThreadPool>,
678 ) -> Result<(), PicScaleError>
679 where
680 ImageStore<'a, T, N>:
681 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
682 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
683 {
684 let new_size = into.get_size();
685 let mut src_store: std::borrow::Cow<'_, ImageStore<'_, T, N>> =
686 std::borrow::Cow::Borrowed(store);
687
688 let mut has_alpha_premultiplied = true;
689
690 if premultiply_alpha_requested {
691 let is_alpha_premultiplication_reasonable =
692 src_store.is_alpha_premultiplication_needed();
693 if is_alpha_premultiplication_reasonable {
694 let mut target_premultiplied =
695 vec![T::default(); src_store.width * src_store.height * N];
696 let mut new_store = ImageStoreMut::<T, N>::from_slice(
697 &mut target_premultiplied,
698 src_store.width,
699 src_store.height,
700 )?;
701 new_store.bit_depth = into.bit_depth;
702 src_store.premultiply_alpha(&mut new_store, pool);
703 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
704 buffer: std::borrow::Cow::Owned(target_premultiplied),
705 channels: N,
706 width: src_store.width,
707 height: src_store.height,
708 stride: src_store.width * N,
709 bit_depth: into.bit_depth,
710 });
711 has_alpha_premultiplied = true;
712 }
713 }
714
715 let mut target_vertical = vec![T::default(); src_store.width * new_size.height * N];
716
717 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
718 &mut target_vertical,
719 src_store.width,
720 new_size.height,
721 )?;
722 new_image_vertical.bit_depth = into.bit_depth;
723 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
724 let options = ConvolutionOptions::new(self.workload_strategy);
725 src_store.convolve_vertical(vertical_filters, &mut new_image_vertical, pool, options);
726
727 let new_immutable_store = ImageStore::<T, N> {
728 buffer: std::borrow::Cow::Owned(target_vertical),
729 channels: N,
730 width: src_store.width,
731 height: new_size.height,
732 stride: src_store.width * N,
733 bit_depth: into.bit_depth,
734 };
735 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
736 let options = ConvolutionOptions::new(self.workload_strategy);
737 new_immutable_store.convolve_horizontal(horizontal_filters, into, pool, options);
738
739 if premultiply_alpha_requested && has_alpha_premultiplied {
740 into.unpremultiply_alpha(pool);
741 }
742
743 Ok(())
744 }
745
746 fn forward_resize_vertical_with_alpha<
747 'a,
748 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
749 const N: usize,
750 >(
751 &self,
752 store: &ImageStore<'a, T, N>,
753 into: &mut ImageStoreMut<'a, T, N>,
754 premultiply_alpha_requested: bool,
755 pool: &Option<ThreadPool>,
756 ) -> Result<(), PicScaleError>
757 where
758 ImageStore<'a, T, N>:
759 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
760 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
761 {
762 let new_size = into.get_size();
763 let mut src_store = std::borrow::Cow::Borrowed(store);
764
765 let mut has_alpha_premultiplied = true;
766
767 if premultiply_alpha_requested {
768 let is_alpha_premultiplication_reasonable =
769 src_store.is_alpha_premultiplication_needed();
770 if is_alpha_premultiplication_reasonable {
771 let mut target_premultiplied =
772 vec![T::default(); src_store.width * src_store.height * N];
773 let mut new_store = ImageStoreMut::<T, N>::from_slice(
774 &mut target_premultiplied,
775 src_store.width,
776 src_store.height,
777 )?;
778 new_store.bit_depth = into.bit_depth;
779 src_store.premultiply_alpha(&mut new_store, pool);
780 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
781 buffer: std::borrow::Cow::Owned(target_premultiplied),
782 channels: N,
783 width: src_store.width,
784 height: src_store.height,
785 stride: src_store.width * N,
786 bit_depth: into.bit_depth,
787 });
788 has_alpha_premultiplied = true;
789 }
790 }
791
792 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
793 let options = ConvolutionOptions::new(self.workload_strategy);
794 src_store.convolve_vertical(vertical_filters, into, pool, options);
795
796 if premultiply_alpha_requested && has_alpha_premultiplied {
797 into.unpremultiply_alpha(pool);
798 }
799
800 Ok(())
801 }
802
803 fn forward_resize_horizontal_with_alpha<
804 'a,
805 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
806 const N: usize,
807 >(
808 &self,
809 store: &ImageStore<'a, T, N>,
810 into: &mut ImageStoreMut<'a, T, N>,
811 premultiply_alpha_requested: bool,
812 pool: &Option<ThreadPool>,
813 ) -> Result<(), PicScaleError>
814 where
815 ImageStore<'a, T, N>:
816 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
817 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
818 {
819 let new_size = into.get_size();
820 let mut src_store = std::borrow::Cow::Borrowed(store);
821
822 let mut has_alpha_premultiplied = true;
823
824 if premultiply_alpha_requested {
825 let is_alpha_premultiplication_reasonable =
826 src_store.is_alpha_premultiplication_needed();
827 if is_alpha_premultiplication_reasonable {
828 let mut target_premultiplied =
829 vec![T::default(); src_store.width * src_store.height * N];
830 let mut new_store = ImageStoreMut::<T, N>::from_slice(
831 &mut target_premultiplied,
832 src_store.width,
833 src_store.height,
834 )?;
835 new_store.bit_depth = into.bit_depth;
836 src_store.premultiply_alpha(&mut new_store, pool);
837 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
838 buffer: std::borrow::Cow::Owned(target_premultiplied),
839 channels: N,
840 width: src_store.width,
841 height: src_store.height,
842 stride: src_store.width * N,
843 bit_depth: into.bit_depth,
844 });
845 has_alpha_premultiplied = true;
846 }
847 }
848
849 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
850 let options = ConvolutionOptions::new(self.workload_strategy);
851 src_store.convolve_horizontal(horizontal_filters, into, pool, options);
852
853 if premultiply_alpha_requested && has_alpha_premultiplied {
854 into.unpremultiply_alpha(pool);
855 }
856
857 Ok(())
858 }
859
860 pub(crate) fn generic_resize_with_alpha<
861 'a,
862 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
863 const N: usize,
864 >(
865 &self,
866 store: &ImageStore<'a, T, N>,
867 into: &mut ImageStoreMut<'a, T, N>,
868 premultiply_alpha_requested: bool,
869 ) -> Result<(), PicScaleError>
870 where
871 ImageStore<'a, T, N>:
872 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
873 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
874 {
875 let new_size = into.get_size();
876 into.validate()?;
877 store.validate()?;
878 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
879 return Err(PicScaleError::ZeroImageDimensions);
880 }
881
882 if check_image_size_overflow(store.width, store.height, store.channels) {
883 return Err(PicScaleError::SourceImageIsTooLarge);
884 }
885
886 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
887 return Err(PicScaleError::DestinationImageIsTooLarge);
888 }
889
890 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
891 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
892 }
893
894 if store.width == new_size.width && store.height == new_size.height {
895 store.copied_to_mut(into);
896 return Ok(());
897 }
898
899 let pool = self
900 .threading_policy
901 .get_pool(ImageSize::new(new_size.width, new_size.height));
902
903 if self.function == ResamplingFunction::Nearest {
904 resize_nearest::<T, N>(
905 store.buffer.as_ref(),
906 store.width,
907 store.height,
908 into.buffer.borrow_mut(),
909 new_size.width,
910 new_size.height,
911 &pool,
912 );
913 return Ok(());
914 }
915
916 let should_do_horizontal = store.width != new_size.width;
917 let should_do_vertical = store.height != new_size.height;
918 assert!(should_do_horizontal || should_do_vertical);
919
920 if should_do_vertical && should_do_horizontal {
921 self.forward_resize_with_alpha(store, into, premultiply_alpha_requested, &pool)
922 } else if should_do_vertical {
923 self.forward_resize_vertical_with_alpha(store, into, premultiply_alpha_requested, &pool)
924 } else {
925 assert!(should_do_horizontal);
926 self.forward_resize_horizontal_with_alpha(
927 store,
928 into,
929 premultiply_alpha_requested,
930 &pool,
931 )
932 }
933 }
934}
935
936impl Scaling for Scaler {
937 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy) {
938 self.threading_policy = threading_policy;
939 }
940
941 fn resize_cbcr8<'a>(
942 &'a self,
943 store: &ImageStore<'a, u8, 2>,
944 into: &mut ImageStoreMut<'a, u8, 2>,
945 ) -> Result<(), PicScaleError> {
946 self.generic_resize(store, into)
947 }
948
949 fn resize_rgb<'a>(
950 &'a self,
951 store: &ImageStore<'a, u8, 3>,
952 into: &mut ImageStoreMut<'a, u8, 3>,
953 ) -> Result<(), PicScaleError> {
954 self.generic_resize(store, into)
955 }
956
957 fn resize_rgba<'a>(
958 &'a self,
959 store: &ImageStore<'a, u8, 4>,
960 into: &mut ImageStoreMut<'a, u8, 4>,
961 premultiply_alpha: bool,
962 ) -> Result<(), PicScaleError> {
963 self.generic_resize_with_alpha(store, into, premultiply_alpha)
964 }
965}
966
967impl ScalingF32 for Scaler {
968 fn resize_cbcr_f32<'a>(
969 &'a self,
970 store: &ImageStore<'a, f32, 2>,
971 into: &mut ImageStoreMut<'a, f32, 2>,
972 ) -> Result<(), PicScaleError> {
973 self.generic_resize(store, into)
974 }
975
976 fn resize_rgb_f32<'a>(
977 &'a self,
978 store: &ImageStore<'a, f32, 3>,
979 into: &mut ImageStoreMut<'a, f32, 3>,
980 ) -> Result<(), PicScaleError> {
981 self.generic_resize(store, into)
982 }
983
984 fn resize_rgba_f32<'a>(
985 &'a self,
986 store: &ImageStore<'a, f32, 4>,
987 into: &mut ImageStoreMut<'a, f32, 4>,
988 premultiply_alpha: bool,
989 ) -> Result<(), PicScaleError> {
990 self.generic_resize_with_alpha(store, into, premultiply_alpha)
991 }
992}
993
994impl Scaler {
995 pub fn resize_plane_f32<'a>(
997 &'a self,
998 store: &ImageStore<'a, f32, 1>,
999 into: &mut ImageStoreMut<'a, f32, 1>,
1000 ) -> Result<(), PicScaleError> {
1001 self.generic_resize(store, into)
1002 }
1003}
1004
1005impl Scaler {
1006 pub fn resize_plane<'a>(
1008 &'a self,
1009 store: &ImageStore<'a, u8, 1>,
1010 into: &mut ImageStoreMut<'a, u8, 1>,
1011 ) -> Result<(), PicScaleError> {
1012 self.generic_resize(store, into)
1013 }
1014}
1015
1016impl ScalingU16 for Scaler {
1017 fn resize_rgb_u16<'a>(
1043 &'a self,
1044 store: &ImageStore<'a, u16, 3>,
1045 into: &mut ImageStoreMut<'a, u16, 3>,
1046 ) -> Result<(), PicScaleError> {
1047 self.generic_resize(store, into)
1048 }
1049
1050 fn resize_cbcr_u16<'a>(
1051 &'a self,
1052 store: &ImageStore<'a, u16, 2>,
1053 into: &mut ImageStoreMut<'a, u16, 2>,
1054 ) -> Result<(), PicScaleError> {
1055 self.generic_resize(store, into)
1056 }
1057
1058 fn resize_rgba_u16<'a>(
1080 &'a self,
1081 store: &ImageStore<'a, u16, 4>,
1082 into: &mut ImageStoreMut<'a, u16, 4>,
1083 premultiply_alpha: bool,
1084 ) -> Result<(), PicScaleError> {
1085 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1086 }
1087
1088 fn resize_plane_u16<'a>(
1112 &'a self,
1113 store: &ImageStore<'a, u16, 1>,
1114 into: &mut ImageStoreMut<'a, u16, 1>,
1115 ) -> Result<(), PicScaleError> {
1116 self.generic_resize(store, into)
1117 }
1118}
1119
1120impl Scaler {
1121 pub fn resize_ar30(
1132 &self,
1133 src: &[u8],
1134 src_stride: usize,
1135 src_size: ImageSize,
1136 dst: &mut [u8],
1137 dst_stride: usize,
1138 new_size: ImageSize,
1139 order: Ar30ByteOrder,
1140 ) -> Result<(), PicScaleError> {
1141 match order {
1142 Ar30ByteOrder::Host => {
1143 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Host as usize }>(
1144 src, src_stride, src_size, dst, dst_stride, new_size, self,
1145 )
1146 }
1147 Ar30ByteOrder::Network => {
1148 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Network as usize }>(
1149 src, src_stride, src_size, dst, dst_stride, new_size, self,
1150 )
1151 }
1152 }
1153 }
1154
1155 pub fn resize_ra30(
1166 &self,
1167 src: &[u8],
1168 src_stride: usize,
1169 src_size: ImageSize,
1170 dst: &mut [u8],
1171 dst_stride: usize,
1172 new_size: ImageSize,
1173 order: Ar30ByteOrder,
1174 ) -> Result<(), PicScaleError> {
1175 match order {
1176 Ar30ByteOrder::Host => {
1177 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Host as usize }>(
1178 src, src_stride, src_size, dst, dst_stride, new_size, self,
1179 )
1180 }
1181 Ar30ByteOrder::Network => {
1182 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Network as usize }>(
1183 src, src_stride, src_size, dst, dst_stride, new_size, self,
1184 )
1185 }
1186 }
1187 }
1188}
1189
1190#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Default)]
1192pub struct ScalingOptions {
1193 pub resampling_function: ResamplingFunction,
1194 pub premultiply_alpha: bool,
1195 pub use_multithreading: bool,
1196}
1197
1198pub trait ImageStoreScaling<'b, T, const N: usize>
1199where
1200 T: Clone + Copy + Debug,
1201{
1202 fn scale(
1203 &self,
1204 store: &mut ImageStoreMut<'b, T, N>,
1205 options: ScalingOptions,
1206 ) -> Result<(), PicScaleError>;
1207}
1208
1209impl<'b> ImageStoreScaling<'b, u8, 4> for Rgba8ImageStore<'b> {
1210 fn scale(
1211 &self,
1212 store: &mut ImageStoreMut<'b, u8, 4>,
1213 options: ScalingOptions,
1214 ) -> Result<(), PicScaleError> {
1215 let mut scaler = Scaler::new(options.resampling_function);
1216 scaler.set_threading_policy(if options.use_multithreading {
1217 ThreadingPolicy::Adaptive
1218 } else {
1219 ThreadingPolicy::Single
1220 });
1221 scaler.generic_resize_with_alpha(self, store, options.premultiply_alpha)
1222 }
1223}
1224
1225impl<'b> ImageStoreScaling<'b, u8, 3> for Rgb8ImageStore<'b> {
1226 fn scale(
1227 &self,
1228 store: &mut ImageStoreMut<'b, u8, 3>,
1229 options: ScalingOptions,
1230 ) -> Result<(), PicScaleError> {
1231 let mut scaler = Scaler::new(options.resampling_function);
1232 scaler.set_threading_policy(if options.use_multithreading {
1233 ThreadingPolicy::Adaptive
1234 } else {
1235 ThreadingPolicy::Single
1236 });
1237 scaler.generic_resize(self, store)
1238 }
1239}
1240
1241impl<'b> ImageStoreScaling<'b, u8, 2> for CbCr8ImageStore<'b> {
1242 fn scale(
1243 &self,
1244 store: &mut ImageStoreMut<'b, u8, 2>,
1245 options: ScalingOptions,
1246 ) -> Result<(), PicScaleError> {
1247 let mut scaler = Scaler::new(options.resampling_function);
1248 scaler.set_threading_policy(if options.use_multithreading {
1249 ThreadingPolicy::Adaptive
1250 } else {
1251 ThreadingPolicy::Single
1252 });
1253 scaler.generic_resize(self, store)
1254 }
1255}
1256
1257impl<'b> ImageStoreScaling<'b, u8, 1> for Planar8ImageStore<'b> {
1258 fn scale(
1259 &self,
1260 store: &mut ImageStoreMut<'b, u8, 1>,
1261 options: ScalingOptions,
1262 ) -> Result<(), PicScaleError> {
1263 let mut scaler = Scaler::new(options.resampling_function);
1264 scaler.set_threading_policy(if options.use_multithreading {
1265 ThreadingPolicy::Adaptive
1266 } else {
1267 ThreadingPolicy::Single
1268 });
1269 scaler.generic_resize(self, store)
1270 }
1271}
1272
1273impl<'b> ImageStoreScaling<'b, u16, 1> for Planar16ImageStore<'b> {
1274 fn scale(
1275 &self,
1276 store: &mut ImageStoreMut<'b, u16, 1>,
1277 options: ScalingOptions,
1278 ) -> Result<(), PicScaleError> {
1279 let mut scaler = Scaler::new(options.resampling_function);
1280 scaler.set_threading_policy(if options.use_multithreading {
1281 ThreadingPolicy::Adaptive
1282 } else {
1283 ThreadingPolicy::Single
1284 });
1285 scaler.generic_resize(self, store)
1286 }
1287}
1288
1289impl<'b> ImageStoreScaling<'b, u16, 2> for CbCr16ImageStore<'b> {
1290 fn scale<'a>(
1291 &self,
1292 store: &mut ImageStoreMut<'b, u16, 2>,
1293 options: ScalingOptions,
1294 ) -> Result<(), PicScaleError> {
1295 let mut scaler = Scaler::new(options.resampling_function);
1296 scaler.set_threading_policy(if options.use_multithreading {
1297 ThreadingPolicy::Adaptive
1298 } else {
1299 ThreadingPolicy::Single
1300 });
1301 scaler.generic_resize(self, store)
1302 }
1303}
1304
1305impl<'b> ImageStoreScaling<'b, u16, 3> for Rgb16ImageStore<'b> {
1306 fn scale(
1307 &self,
1308 store: &mut ImageStoreMut<'b, u16, 3>,
1309 options: ScalingOptions,
1310 ) -> Result<(), PicScaleError> {
1311 let mut scaler = Scaler::new(options.resampling_function);
1312 scaler.set_threading_policy(if options.use_multithreading {
1313 ThreadingPolicy::Adaptive
1314 } else {
1315 ThreadingPolicy::Single
1316 });
1317 scaler.generic_resize(self, store)
1318 }
1319}
1320
1321impl<'b> ImageStoreScaling<'b, u16, 4> for Rgba16ImageStore<'b> {
1322 fn scale(
1323 &self,
1324 store: &mut ImageStoreMut<'b, u16, 4>,
1325 options: ScalingOptions,
1326 ) -> Result<(), PicScaleError> {
1327 let mut scaler = Scaler::new(options.resampling_function);
1328 scaler.set_threading_policy(if options.use_multithreading {
1329 ThreadingPolicy::Adaptive
1330 } else {
1331 ThreadingPolicy::Single
1332 });
1333 scaler.generic_resize_with_alpha(self, store, options.premultiply_alpha)
1334 }
1335}
1336
1337impl<'b> ImageStoreScaling<'b, f32, 1> for PlanarF32ImageStore<'b> {
1338 fn scale<'a>(
1339 &self,
1340 store: &mut ImageStoreMut<'b, f32, 1>,
1341 options: ScalingOptions,
1342 ) -> Result<(), PicScaleError> {
1343 let mut scaler = Scaler::new(options.resampling_function);
1344 scaler.set_threading_policy(if options.use_multithreading {
1345 ThreadingPolicy::Adaptive
1346 } else {
1347 ThreadingPolicy::Single
1348 });
1349 scaler.generic_resize(self, store)
1350 }
1351}
1352
1353impl<'b> ImageStoreScaling<'b, f32, 2> for CbCrF32ImageStore<'b> {
1354 fn scale<'a>(
1355 &self,
1356 store: &mut ImageStoreMut<'b, f32, 2>,
1357 options: ScalingOptions,
1358 ) -> Result<(), PicScaleError> {
1359 let mut scaler = Scaler::new(options.resampling_function);
1360 scaler.set_threading_policy(if options.use_multithreading {
1361 ThreadingPolicy::Adaptive
1362 } else {
1363 ThreadingPolicy::Single
1364 });
1365 scaler.generic_resize(self, store)
1366 }
1367}
1368
1369impl<'b> ImageStoreScaling<'b, f32, 3> for RgbF32ImageStore<'b> {
1370 fn scale(
1371 &self,
1372 store: &mut ImageStoreMut<'b, f32, 3>,
1373 options: ScalingOptions,
1374 ) -> Result<(), PicScaleError> {
1375 let mut scaler = Scaler::new(options.resampling_function);
1376 scaler.set_threading_policy(if options.use_multithreading {
1377 ThreadingPolicy::Adaptive
1378 } else {
1379 ThreadingPolicy::Single
1380 });
1381 scaler.generic_resize(self, store)
1382 }
1383}
1384
1385impl<'b> ImageStoreScaling<'b, f32, 4> for RgbaF32ImageStore<'b> {
1386 fn scale(
1387 &self,
1388 store: &mut ImageStoreMut<'b, f32, 4>,
1389 options: ScalingOptions,
1390 ) -> Result<(), PicScaleError> {
1391 let mut scaler = Scaler::new(options.resampling_function);
1392 scaler.set_threading_policy(if options.use_multithreading {
1393 ThreadingPolicy::Adaptive
1394 } else {
1395 ThreadingPolicy::Single
1396 });
1397 scaler.generic_resize_with_alpha(self, store, options.premultiply_alpha)
1398 }
1399}
1400
1401#[cfg(test)]
1402mod tests {
1403 use super::*;
1404
1405 #[test]
1406 fn check_rgba8_resizing_vertical() {
1407 let image_width = 255;
1408 let image_height = 512;
1409 const CN: usize = 4;
1410 let mut image = vec![0u8; image_height * image_width * CN];
1411 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1412 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1413 scaler.set_threading_policy(ThreadingPolicy::Single);
1414 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1415 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1416 scaler
1417 .resize_rgba(&src_store, &mut target_store, false)
1418 .unwrap();
1419 let target_data = target_store.buffer.borrow();
1420
1421 let resized = target_data
1422 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1423 assert_ne!(resized, 0);
1424 }
1425
1426 #[test]
1427 fn check_rgba8_resizing_both() {
1428 let image_width = 255;
1429 let image_height = 512;
1430 const CN: usize = 4;
1431 let mut image = vec![0u8; image_height * image_width * CN];
1432 image[0] = 174;
1433 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1434 scaler.set_threading_policy(ThreadingPolicy::Single);
1435 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1436 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1437 scaler
1438 .resize_rgba(&src_store, &mut target_store, false)
1439 .unwrap();
1440 let target_data = target_store.buffer.borrow();
1441
1442 let resized = target_data[0];
1443 assert_ne!(resized, 0);
1444 }
1445
1446 #[test]
1447 fn check_rgba8_resizing_alpha() {
1448 let image_width = 255;
1449 let image_height = 512;
1450 const CN: usize = 4;
1451 let mut image = vec![0u8; image_height * image_width * CN];
1452 image[0] = 174;
1453 image[7] = 1;
1454 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1455 scaler.set_threading_policy(ThreadingPolicy::Single);
1456 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1457 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1458 scaler
1459 .resize_rgba(&src_store, &mut target_store, true)
1460 .unwrap();
1461 let target_data = target_store.buffer.borrow();
1462
1463 let resized = target_data[0];
1464 assert_eq!(resized, 0);
1465 }
1466
1467 #[test]
1468 fn check_rgb8_resizing_vertical() {
1469 let image_width = 255;
1470 let image_height = 512;
1471 const CN: usize = 3;
1472 let mut image = vec![0u8; image_height * image_width * CN];
1473 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1474 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1475 scaler.set_threading_policy(ThreadingPolicy::Single);
1476 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1477 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1478 scaler.resize_rgb(&src_store, &mut target_store).unwrap();
1479 let target_data = target_store.buffer.borrow();
1480
1481 let resized = target_data
1482 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1483 assert_ne!(resized, 0);
1484 }
1485
1486 #[test]
1487 fn check_rgba10_resizing_vertical() {
1488 let image_width = 8;
1489 let image_height = 8;
1490 const CN: usize = 4;
1491 let mut image = vec![0u16; image_height * image_width * CN];
1492 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1493 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1494 scaler.set_threading_policy(ThreadingPolicy::Single);
1495 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1496 src_store.bit_depth = 10;
1497 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1498 scaler
1499 .resize_rgba_u16(&src_store, &mut target_store, false)
1500 .unwrap();
1501 let target_data = target_store.buffer.borrow();
1502
1503 let resized = target_data
1504 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1505 assert_ne!(resized, 0);
1506 }
1507
1508 #[test]
1509 fn check_rgb10_resizing_vertical() {
1510 let image_width = 8;
1511 let image_height = 4;
1512 const CN: usize = 3;
1513 let mut image = vec![0; image_height * image_width * CN];
1514 image[0] = 174;
1515 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1516 scaler.set_threading_policy(ThreadingPolicy::Single);
1517 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1518 src_store.bit_depth = 10;
1519 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1520 scaler
1521 .resize_rgb_u16(&src_store, &mut target_store)
1522 .unwrap();
1523 let target_data = target_store.buffer.borrow();
1524
1525 let resized = target_data[0];
1526 assert_ne!(resized, 0);
1527 }
1528
1529 #[test]
1530 fn check_rgb16_resizing_vertical() {
1531 let image_width = 8;
1532 let image_height = 8;
1533 const CN: usize = 3;
1534 let mut image = vec![164; image_height * image_width * CN];
1535 image[0] = 174;
1536 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1537 scaler.set_threading_policy(ThreadingPolicy::Single);
1538 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1539 src_store.bit_depth = 10;
1540 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1541 scaler
1542 .resize_rgb_u16(&src_store, &mut target_store)
1543 .unwrap();
1544 let target_data = target_store.buffer.borrow();
1545
1546 let resized = target_data[0];
1547 assert_ne!(resized, 0);
1548 }
1549
1550 #[test]
1551 fn check_rgba16_resizing_vertical() {
1552 let image_width = 8;
1553 let image_height = 8;
1554 const CN: usize = 4;
1555 let mut image = vec![0u16; image_height * image_width * CN];
1556 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1557 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1558 scaler.set_threading_policy(ThreadingPolicy::Single);
1559 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1560 src_store.bit_depth = 10;
1561 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1562 scaler
1563 .resize_rgba_u16(&src_store, &mut target_store, false)
1564 .unwrap();
1565 let target_data = target_store.buffer.borrow();
1566
1567 let resized = target_data
1568 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1569 assert_ne!(resized, 0);
1570 }
1571
1572 #[test]
1573 fn check_rgba8_nearest_vertical() {
1574 let image_width = 255;
1575 let image_height = 512;
1576 const CN: usize = 4;
1577 let mut image = vec![0u8; image_height * image_width * CN];
1578 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1579 let mut scaler = Scaler::new(ResamplingFunction::Nearest);
1580 scaler.set_threading_policy(ThreadingPolicy::Single);
1581 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1582 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1583 scaler
1584 .resize_rgba(&src_store, &mut target_store, false)
1585 .unwrap();
1586 let target_data = target_store.buffer.borrow();
1587
1588 let resized = target_data
1589 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1590 assert_eq!(resized, 174);
1591 }
1592}