1#![forbid(unsafe_code)]
30use crate::ar30::{Ar30ByteOrder, Rgb30};
31use crate::convolution::{ConvolutionOptions, HorizontalConvolutionPass, VerticalConvolutionPass};
32use crate::filter_weights::{FilterBounds, FilterWeights};
33use crate::image_size::ImageSize;
34use crate::image_store::{
35 AssociateAlpha, CheckStoreDensity, ImageStore, ImageStoreMut, UnassociateAlpha,
36};
37use crate::nearest_sampler::resize_nearest;
38use crate::pic_scale_error::PicScaleError;
39use crate::resize_ar30::resize_ar30_impl;
40use crate::support::check_image_size_overflow;
41use crate::threading_policy::ThreadingPolicy;
42use crate::{
43 CbCr8ImageStore, CbCr16ImageStore, CbCrF32ImageStore, ConstPI, ConstSqrt2, Jinc,
44 Planar8ImageStore, Planar16ImageStore, PlanarF32ImageStore, ResamplingFunction, Rgb8ImageStore,
45 Rgb16ImageStore, RgbF32ImageStore, Rgba8ImageStore, Rgba16ImageStore, RgbaF32ImageStore,
46};
47use num_traits::{AsPrimitive, Float, Signed};
48use rayon::ThreadPool;
49use std::fmt::Debug;
50use std::ops::{AddAssign, MulAssign, Neg};
51
52#[derive(Debug, Copy, Clone)]
53pub struct Scaler {
55 pub(crate) function: ResamplingFunction,
56 pub(crate) threading_policy: ThreadingPolicy,
57 pub workload_strategy: WorkloadStrategy,
58}
59
60pub trait Scaling {
62 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy);
75
76 fn resize_plane<'a>(
89 &'a self,
90 store: &ImageStore<'a, u8, 1>,
91 into: &mut ImageStoreMut<'a, u8, 1>,
92 ) -> Result<(), PicScaleError>;
93
94 fn resize_cbcr8<'a>(
109 &'a self,
110 store: &ImageStore<'a, u8, 2>,
111 into: &mut ImageStoreMut<'a, u8, 2>,
112 ) -> Result<(), PicScaleError>;
113
114 fn resize_gray_alpha<'a>(
129 &'a self,
130 store: &ImageStore<'a, u8, 2>,
131 into: &mut ImageStoreMut<'a, u8, 2>,
132 premultiply_alpha: bool,
133 ) -> Result<(), PicScaleError>;
134
135 fn resize_rgb<'a>(
148 &'a self,
149 store: &ImageStore<'a, u8, 3>,
150 into: &mut ImageStoreMut<'a, u8, 3>,
151 ) -> Result<(), PicScaleError>;
152
153 fn resize_rgba<'a>(
169 &'a self,
170 store: &ImageStore<'a, u8, 4>,
171 into: &mut ImageStoreMut<'a, u8, 4>,
172 premultiply_alpha: bool,
173 ) -> Result<(), PicScaleError>;
174}
175
176pub trait ScalingF32 {
178 fn resize_plane_f32<'a>(
191 &'a self,
192 store: &ImageStore<'a, f32, 1>,
193 into: &mut ImageStoreMut<'a, f32, 1>,
194 ) -> Result<(), PicScaleError>;
195
196 fn resize_cbcr_f32<'a>(
211 &'a self,
212 store: &ImageStore<'a, f32, 2>,
213 into: &mut ImageStoreMut<'a, f32, 2>,
214 ) -> Result<(), PicScaleError>;
215
216 fn resize_gray_alpha_f32<'a>(
231 &'a self,
232 store: &ImageStore<'a, f32, 2>,
233 into: &mut ImageStoreMut<'a, f32, 2>,
234 premultiply_alpha: bool,
235 ) -> Result<(), PicScaleError>;
236
237 fn resize_rgb_f32<'a>(
252 &'a self,
253 store: &ImageStore<'a, f32, 3>,
254 into: &mut ImageStoreMut<'a, f32, 3>,
255 ) -> Result<(), PicScaleError>;
256
257 fn resize_rgba_f32<'a>(
273 &'a self,
274 store: &ImageStore<'a, f32, 4>,
275 into: &mut ImageStoreMut<'a, f32, 4>,
276 premultiply_alpha: bool,
277 ) -> Result<(), PicScaleError>;
278}
279
280#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Default)]
282pub enum WorkloadStrategy {
283 PreferQuality,
285 #[default]
287 PreferSpeed,
288}
289
290pub trait ScalingU16 {
292 fn resize_plane_u16<'a>(
316 &'a self,
317 store: &ImageStore<'a, u16, 1>,
318 into: &mut ImageStoreMut<'a, u16, 1>,
319 ) -> Result<(), PicScaleError>;
320
321 fn resize_cbcr_u16<'a>(
347 &'a self,
348 store: &ImageStore<'a, u16, 2>,
349 into: &mut ImageStoreMut<'a, u16, 2>,
350 ) -> Result<(), PicScaleError>;
351
352 fn resize_gray_alpha16<'a>(
367 &'a self,
368 store: &ImageStore<'a, u16, 2>,
369 into: &mut ImageStoreMut<'a, u16, 2>,
370 premultiply_alpha: bool,
371 ) -> Result<(), PicScaleError>;
372
373 fn resize_rgb_u16<'a>(
399 &'a self,
400 store: &ImageStore<'a, u16, 3>,
401 into: &mut ImageStoreMut<'a, u16, 3>,
402 ) -> Result<(), PicScaleError>;
403
404 fn resize_rgba_u16<'a>(
432 &'a self,
433 store: &ImageStore<'a, u16, 4>,
434 into: &mut ImageStoreMut<'a, u16, 4>,
435 premultiply_alpha: bool,
436 ) -> Result<(), PicScaleError>;
437}
438
439impl Scaler {
440 pub fn new(filter: ResamplingFunction) -> Self {
445 Scaler {
446 function: filter,
447 threading_policy: ThreadingPolicy::Single,
448 workload_strategy: WorkloadStrategy::default(),
449 }
450 }
451
452 pub fn set_workload_strategy(&mut self, workload_strategy: WorkloadStrategy) {
456 self.workload_strategy = workload_strategy;
457 }
458
459 pub(crate) fn generate_weights<T>(&self, in_size: usize, out_size: usize) -> FilterWeights<T>
460 where
461 T: Copy
462 + Neg
463 + Signed
464 + Float
465 + 'static
466 + ConstPI
467 + MulAssign<T>
468 + AddAssign<T>
469 + AsPrimitive<f64>
470 + AsPrimitive<usize>
471 + Jinc<T>
472 + ConstSqrt2
473 + Default
474 + AsPrimitive<i32>,
475 f32: AsPrimitive<T>,
476 f64: AsPrimitive<T>,
477 i64: AsPrimitive<T>,
478 i32: AsPrimitive<T>,
479 usize: AsPrimitive<T>,
480 {
481 let resampling_filter = self.function.get_resampling_filter();
482 let scale = in_size.as_() / out_size.as_();
483 let is_resizable_kernel = resampling_filter.is_resizable_kernel;
484 let filter_scale_cutoff = match is_resizable_kernel {
485 true => scale.max(1f32.as_()),
486 false => 1f32.as_(),
487 };
488 let filter_base_size = resampling_filter.min_kernel_size;
489 let resampling_function = resampling_filter.kernel;
490
491 let is_area = resampling_filter.is_area && scale < 1.as_();
492
493 let mut bounds: Vec<FilterBounds> = vec![FilterBounds::new(0, 0); out_size];
494
495 if !is_area {
496 let window_func = resampling_filter.window;
497 let base_size: usize = (filter_base_size.as_() * filter_scale_cutoff).round().as_();
498 let kernel_size = base_size;
499 let filter_radius = base_size.as_() / 2.as_();
500 let filter_scale = 1f32.as_() / filter_scale_cutoff;
501 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
502 let mut local_filters = vec![T::default(); kernel_size];
503 let mut filter_position = 0usize;
504 let blur_scale = match window_func {
505 None => 1f32.as_(),
506 Some(window) => {
507 if window.blur.as_() > 0f32.as_() {
508 1f32.as_() / window.blur.as_()
509 } else {
510 0f32.as_()
511 }
512 }
513 };
514 for (i, bound) in bounds.iter_mut().enumerate() {
515 let center_x = ((i.as_() + 0.5.as_()) * scale).min(in_size.as_());
516 let mut weights_sum: T = 0f32.as_();
517 let mut local_filter_iteration = 0usize;
518
519 let start: usize = (center_x - filter_radius).floor().max(0f32.as_()).as_();
520 let end: usize = (center_x + filter_radius)
521 .ceil()
522 .min(start.as_() + kernel_size.as_())
523 .min(in_size.as_())
524 .as_();
525
526 let center = center_x - 0.5.as_();
527
528 for (k, filter) in (start..end).zip(local_filters.iter_mut()) {
529 let dx = k.as_() - center;
530 let weight;
531 if let Some(resampling_window) = window_func {
532 let mut x = dx.abs();
533 x = if resampling_window.blur.as_() > 0f32.as_() {
534 x * blur_scale
535 } else {
536 x
537 };
538 x = if x <= resampling_window.taper.as_() {
539 0f32.as_()
540 } else {
541 (x - resampling_window.taper.as_())
542 / (1f32.as_() - resampling_window.taper.as_())
543 };
544 let window_producer = resampling_window.window;
545 let x_kernel_scaled = x * filter_scale;
546 let window = if x < resampling_window.window_size.as_() {
547 window_producer(x_kernel_scaled * resampling_window.window_size.as_())
548 } else {
549 0f32.as_()
550 };
551 weight = window * resampling_function(x_kernel_scaled);
552 } else {
553 let dx = dx.abs();
554 weight = resampling_function(dx * filter_scale);
555 }
556 weights_sum += weight;
557 *filter = weight;
558 local_filter_iteration += 1;
559 }
560
561 let alpha: T = 0.7f32.as_();
562 if resampling_filter.is_ewa && !local_filters.is_empty() {
563 weights_sum = local_filters[0];
564 for j in 1..local_filter_iteration {
565 let new_weight =
566 alpha * local_filters[j] + (1f32.as_() - alpha) * local_filters[j - 1];
567 local_filters[j] = new_weight;
568 weights_sum += new_weight;
569 }
570 }
571
572 let size = end - start;
573
574 *bound = FilterBounds::new(start, size);
575
576 if weights_sum != 0f32.as_() {
577 let recpeq = 1f32.as_() / weights_sum;
578
579 for (dst, src) in weights
580 .iter_mut()
581 .skip(filter_position)
582 .take(size)
583 .zip(local_filters.iter().take(size))
584 {
585 *dst = *src * recpeq;
586 }
587 }
588
589 filter_position += kernel_size;
590 }
591
592 FilterWeights::<T>::new(
593 weights,
594 kernel_size,
595 kernel_size,
596 out_size,
597 filter_radius.as_(),
598 bounds,
599 )
600 } else {
601 let inv_scale: T = 1.as_() / scale;
605 let kernel_size = 2;
606 let filter_radius: T = 1.as_();
607 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
608 let mut local_filters = vec![T::default(); kernel_size];
609 let mut filter_position = 0usize;
610
611 for (i, bound) in bounds.iter_mut().enumerate() {
612 let mut weights_sum: T = 0f32.as_();
613
614 let sx: T = (i.as_() * scale).floor();
615 let fx = (i as i64 + 1).as_() - (sx + 1.as_()) * inv_scale;
616 let dx = if fx <= 0.as_() {
617 0.as_()
618 } else {
619 fx - fx.floor()
620 };
621 let dx = dx.abs();
622 let weight0 = 1.as_() - dx;
623 let weight1: T = dx;
624 local_filters[0] = weight0;
625 local_filters[1] = weight1;
626
627 let start: usize = sx.floor().max(0f32.as_()).as_();
628 let end: usize = (sx + kernel_size.as_())
629 .ceil()
630 .min(start.as_() + kernel_size.as_())
631 .min(in_size.as_())
632 .as_();
633
634 let size = end - start;
635
636 weights_sum += weight0;
637 if size > 1 {
638 weights_sum += weight1;
639 }
640 *bound = FilterBounds::new(start, size);
641
642 if weights_sum != 0f32.as_() {
643 let recpeq = 1f32.as_() / weights_sum;
644
645 for (dst, src) in weights
646 .iter_mut()
647 .skip(filter_position)
648 .take(size)
649 .zip(local_filters.iter().take(size))
650 {
651 *dst = *src * recpeq;
652 }
653 } else {
654 weights[filter_position] = 1.as_();
655 }
656
657 filter_position += kernel_size;
658 }
659
660 FilterWeights::new(
661 weights,
662 kernel_size,
663 kernel_size,
664 out_size,
665 filter_radius.as_(),
666 bounds,
667 )
668 }
669 }
670}
671
672impl Scaler {
673 pub(crate) fn generic_resize<
674 'a,
675 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
676 const N: usize,
677 >(
678 &self,
679 store: &ImageStore<'a, T, N>,
680 into: &mut ImageStoreMut<'a, T, N>,
681 ) -> Result<(), PicScaleError>
682 where
683 ImageStore<'a, T, N>: VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N>,
684 ImageStoreMut<'a, T, N>: CheckStoreDensity,
685 {
686 let new_size = into.get_size();
687 into.validate()?;
688 store.validate()?;
689 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
690 return Err(PicScaleError::ZeroImageDimensions);
691 }
692
693 if check_image_size_overflow(store.width, store.height, store.channels) {
694 return Err(PicScaleError::SourceImageIsTooLarge);
695 }
696
697 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
698 return Err(PicScaleError::DestinationImageIsTooLarge);
699 }
700
701 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
702 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
703 }
704
705 if store.width == new_size.width && store.height == new_size.height {
706 store.copied_to_mut(into);
707 return Ok(());
708 }
709
710 let pool = self
711 .threading_policy
712 .get_pool(ImageSize::new(new_size.width, new_size.height));
713
714 if self.function == ResamplingFunction::Nearest {
715 resize_nearest::<T, N>(
716 store.buffer.as_ref(),
717 store.width,
718 store.height,
719 into.buffer.borrow_mut(),
720 new_size.width,
721 new_size.height,
722 &pool,
723 );
724 return Ok(());
725 }
726
727 let should_do_horizontal = store.width != new_size.width;
728 let should_do_vertical = store.height != new_size.height;
729 assert!(should_do_horizontal || should_do_vertical);
730
731 if should_do_vertical && should_do_horizontal {
732 let mut target_vertical = vec![T::default(); store.width * new_size.height * N];
733
734 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
735 &mut target_vertical,
736 store.width,
737 new_size.height,
738 )?;
739 new_image_vertical.bit_depth = into.bit_depth;
740 let vertical_filters = self.generate_weights(store.height, new_size.height);
741 let options = ConvolutionOptions::new(self.workload_strategy);
742 store.convolve_vertical(vertical_filters, &mut new_image_vertical, &pool, options);
743
744 let new_immutable_store = ImageStore::<T, N> {
745 buffer: std::borrow::Cow::Owned(target_vertical),
746 channels: N,
747 width: store.width,
748 height: new_size.height,
749 stride: store.width * N,
750 bit_depth: into.bit_depth,
751 };
752 let horizontal_filters = self.generate_weights(store.width, new_size.width);
753 let options = ConvolutionOptions::new(self.workload_strategy);
754 new_immutable_store.convolve_horizontal(horizontal_filters, into, &pool, options);
755 Ok(())
756 } else if should_do_vertical {
757 let vertical_filters = self.generate_weights(store.height, new_size.height);
758 let options = ConvolutionOptions::new(self.workload_strategy);
759 store.convolve_vertical(vertical_filters, into, &pool, options);
760 Ok(())
761 } else {
762 assert!(should_do_horizontal);
763 let horizontal_filters = self.generate_weights(store.width, new_size.width);
764 let options = ConvolutionOptions::new(self.workload_strategy);
765 store.convolve_horizontal(horizontal_filters, into, &pool, options);
766 Ok(())
767 }
768 }
769
770 fn forward_resize_with_alpha<
771 'a,
772 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
773 const N: usize,
774 >(
775 &self,
776 store: &ImageStore<'a, T, N>,
777 into: &mut ImageStoreMut<'a, T, N>,
778 premultiply_alpha_requested: bool,
779 pool: &Option<ThreadPool>,
780 ) -> Result<(), PicScaleError>
781 where
782 ImageStore<'a, T, N>:
783 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
784 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
785 {
786 let new_size = into.get_size();
787 let mut src_store: std::borrow::Cow<'_, ImageStore<'_, T, N>> =
788 std::borrow::Cow::Borrowed(store);
789
790 let mut has_alpha_premultiplied = true;
791
792 if premultiply_alpha_requested {
793 let is_alpha_premultiplication_reasonable =
794 src_store.is_alpha_premultiplication_needed();
795 if is_alpha_premultiplication_reasonable {
796 let mut target_premultiplied =
797 vec![T::default(); src_store.width * src_store.height * N];
798 let mut new_store = ImageStoreMut::<T, N>::from_slice(
799 &mut target_premultiplied,
800 src_store.width,
801 src_store.height,
802 )?;
803 new_store.bit_depth = into.bit_depth;
804 src_store.premultiply_alpha(&mut new_store, pool);
805 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
806 buffer: std::borrow::Cow::Owned(target_premultiplied),
807 channels: N,
808 width: src_store.width,
809 height: src_store.height,
810 stride: src_store.width * N,
811 bit_depth: into.bit_depth,
812 });
813 has_alpha_premultiplied = true;
814 }
815 }
816
817 let mut target_vertical = vec![T::default(); src_store.width * new_size.height * N];
818
819 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
820 &mut target_vertical,
821 src_store.width,
822 new_size.height,
823 )?;
824 new_image_vertical.bit_depth = into.bit_depth;
825 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
826 let options = ConvolutionOptions::new(self.workload_strategy);
827 src_store.convolve_vertical(vertical_filters, &mut new_image_vertical, pool, options);
828
829 let new_immutable_store = ImageStore::<T, N> {
830 buffer: std::borrow::Cow::Owned(target_vertical),
831 channels: N,
832 width: src_store.width,
833 height: new_size.height,
834 stride: src_store.width * N,
835 bit_depth: into.bit_depth,
836 };
837 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
838 let options = ConvolutionOptions::new(self.workload_strategy);
839 new_immutable_store.convolve_horizontal(horizontal_filters, into, pool, options);
840
841 if premultiply_alpha_requested && has_alpha_premultiplied {
842 into.unpremultiply_alpha(pool, self.workload_strategy);
843 }
844
845 Ok(())
846 }
847
848 fn forward_resize_vertical_with_alpha<
849 'a,
850 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
851 const N: usize,
852 >(
853 &self,
854 store: &ImageStore<'a, T, N>,
855 into: &mut ImageStoreMut<'a, T, N>,
856 premultiply_alpha_requested: bool,
857 pool: &Option<ThreadPool>,
858 ) -> Result<(), PicScaleError>
859 where
860 ImageStore<'a, T, N>:
861 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
862 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
863 {
864 let new_size = into.get_size();
865 let mut src_store = std::borrow::Cow::Borrowed(store);
866
867 let mut has_alpha_premultiplied = true;
868
869 if premultiply_alpha_requested {
870 let is_alpha_premultiplication_reasonable =
871 src_store.is_alpha_premultiplication_needed();
872 if is_alpha_premultiplication_reasonable {
873 let mut target_premultiplied =
874 vec![T::default(); src_store.width * src_store.height * N];
875 let mut new_store = ImageStoreMut::<T, N>::from_slice(
876 &mut target_premultiplied,
877 src_store.width,
878 src_store.height,
879 )?;
880 new_store.bit_depth = into.bit_depth;
881 src_store.premultiply_alpha(&mut new_store, pool);
882 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
883 buffer: std::borrow::Cow::Owned(target_premultiplied),
884 channels: N,
885 width: src_store.width,
886 height: src_store.height,
887 stride: src_store.width * N,
888 bit_depth: into.bit_depth,
889 });
890 has_alpha_premultiplied = true;
891 }
892 }
893
894 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
895 let options = ConvolutionOptions::new(self.workload_strategy);
896 src_store.convolve_vertical(vertical_filters, into, pool, options);
897
898 if premultiply_alpha_requested && has_alpha_premultiplied {
899 into.unpremultiply_alpha(pool, self.workload_strategy);
900 }
901
902 Ok(())
903 }
904
905 fn forward_resize_horizontal_with_alpha<
906 'a,
907 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
908 const N: usize,
909 >(
910 &self,
911 store: &ImageStore<'a, T, N>,
912 into: &mut ImageStoreMut<'a, T, N>,
913 premultiply_alpha_requested: bool,
914 pool: &Option<ThreadPool>,
915 ) -> Result<(), PicScaleError>
916 where
917 ImageStore<'a, T, N>:
918 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
919 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
920 {
921 let new_size = into.get_size();
922 let mut src_store = std::borrow::Cow::Borrowed(store);
923
924 let mut has_alpha_premultiplied = true;
925
926 if premultiply_alpha_requested {
927 let is_alpha_premultiplication_reasonable =
928 src_store.is_alpha_premultiplication_needed();
929 if is_alpha_premultiplication_reasonable {
930 let mut target_premultiplied =
931 vec![T::default(); src_store.width * src_store.height * N];
932 let mut new_store = ImageStoreMut::<T, N>::from_slice(
933 &mut target_premultiplied,
934 src_store.width,
935 src_store.height,
936 )?;
937 new_store.bit_depth = into.bit_depth;
938 src_store.premultiply_alpha(&mut new_store, pool);
939 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
940 buffer: std::borrow::Cow::Owned(target_premultiplied),
941 channels: N,
942 width: src_store.width,
943 height: src_store.height,
944 stride: src_store.width * N,
945 bit_depth: into.bit_depth,
946 });
947 has_alpha_premultiplied = true;
948 }
949 }
950
951 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
952 let options = ConvolutionOptions::new(self.workload_strategy);
953 src_store.convolve_horizontal(horizontal_filters, into, pool, options);
954
955 if premultiply_alpha_requested && has_alpha_premultiplied {
956 into.unpremultiply_alpha(pool, self.workload_strategy);
957 }
958
959 Ok(())
960 }
961
962 pub(crate) fn generic_resize_with_alpha<
963 'a,
964 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
965 const N: usize,
966 >(
967 &self,
968 store: &ImageStore<'a, T, N>,
969 into: &mut ImageStoreMut<'a, T, N>,
970 premultiply_alpha_requested: bool,
971 ) -> Result<(), PicScaleError>
972 where
973 ImageStore<'a, T, N>:
974 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
975 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
976 {
977 let new_size = into.get_size();
978 into.validate()?;
979 store.validate()?;
980 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
981 return Err(PicScaleError::ZeroImageDimensions);
982 }
983
984 if check_image_size_overflow(store.width, store.height, store.channels) {
985 return Err(PicScaleError::SourceImageIsTooLarge);
986 }
987
988 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
989 return Err(PicScaleError::DestinationImageIsTooLarge);
990 }
991
992 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
993 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
994 }
995
996 if store.width == new_size.width && store.height == new_size.height {
997 store.copied_to_mut(into);
998 return Ok(());
999 }
1000
1001 let pool = self
1002 .threading_policy
1003 .get_pool(ImageSize::new(new_size.width, new_size.height));
1004
1005 if self.function == ResamplingFunction::Nearest {
1006 resize_nearest::<T, N>(
1007 store.buffer.as_ref(),
1008 store.width,
1009 store.height,
1010 into.buffer.borrow_mut(),
1011 new_size.width,
1012 new_size.height,
1013 &pool,
1014 );
1015 return Ok(());
1016 }
1017
1018 let should_do_horizontal = store.width != new_size.width;
1019 let should_do_vertical = store.height != new_size.height;
1020 assert!(should_do_horizontal || should_do_vertical);
1021
1022 if should_do_vertical && should_do_horizontal {
1023 self.forward_resize_with_alpha(store, into, premultiply_alpha_requested, &pool)
1024 } else if should_do_vertical {
1025 self.forward_resize_vertical_with_alpha(store, into, premultiply_alpha_requested, &pool)
1026 } else {
1027 assert!(should_do_horizontal);
1028 self.forward_resize_horizontal_with_alpha(
1029 store,
1030 into,
1031 premultiply_alpha_requested,
1032 &pool,
1033 )
1034 }
1035 }
1036}
1037
1038impl Scaling for Scaler {
1039 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy) {
1040 self.threading_policy = threading_policy;
1041 }
1042
1043 fn resize_plane<'a>(
1044 &'a self,
1045 store: &ImageStore<'a, u8, 1>,
1046 into: &mut ImageStoreMut<'a, u8, 1>,
1047 ) -> Result<(), PicScaleError> {
1048 self.generic_resize(store, into)
1049 }
1050
1051 fn resize_cbcr8<'a>(
1052 &'a self,
1053 store: &ImageStore<'a, u8, 2>,
1054 into: &mut ImageStoreMut<'a, u8, 2>,
1055 ) -> Result<(), PicScaleError> {
1056 self.generic_resize(store, into)
1057 }
1058
1059 fn resize_gray_alpha<'a>(
1060 &'a self,
1061 store: &ImageStore<'a, u8, 2>,
1062 into: &mut ImageStoreMut<'a, u8, 2>,
1063 premultiply_alpha: bool,
1064 ) -> Result<(), PicScaleError> {
1065 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1066 }
1067
1068 fn resize_rgb<'a>(
1069 &'a self,
1070 store: &ImageStore<'a, u8, 3>,
1071 into: &mut ImageStoreMut<'a, u8, 3>,
1072 ) -> Result<(), PicScaleError> {
1073 self.generic_resize(store, into)
1074 }
1075
1076 fn resize_rgba<'a>(
1077 &'a self,
1078 store: &ImageStore<'a, u8, 4>,
1079 into: &mut ImageStoreMut<'a, u8, 4>,
1080 premultiply_alpha: bool,
1081 ) -> Result<(), PicScaleError> {
1082 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1083 }
1084}
1085
1086impl ScalingF32 for Scaler {
1087 fn resize_plane_f32<'a>(
1088 &'a self,
1089 store: &ImageStore<'a, f32, 1>,
1090 into: &mut ImageStoreMut<'a, f32, 1>,
1091 ) -> Result<(), PicScaleError> {
1092 self.generic_resize(store, into)
1093 }
1094
1095 fn resize_cbcr_f32<'a>(
1096 &'a self,
1097 store: &ImageStore<'a, f32, 2>,
1098 into: &mut ImageStoreMut<'a, f32, 2>,
1099 ) -> Result<(), PicScaleError> {
1100 self.generic_resize(store, into)
1101 }
1102
1103 fn resize_gray_alpha_f32<'a>(
1104 &'a self,
1105 store: &ImageStore<'a, f32, 2>,
1106 into: &mut ImageStoreMut<'a, f32, 2>,
1107 premultiply_alpha: bool,
1108 ) -> Result<(), PicScaleError> {
1109 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1110 }
1111
1112 fn resize_rgb_f32<'a>(
1113 &'a self,
1114 store: &ImageStore<'a, f32, 3>,
1115 into: &mut ImageStoreMut<'a, f32, 3>,
1116 ) -> Result<(), PicScaleError> {
1117 self.generic_resize(store, into)
1118 }
1119
1120 fn resize_rgba_f32<'a>(
1121 &'a self,
1122 store: &ImageStore<'a, f32, 4>,
1123 into: &mut ImageStoreMut<'a, f32, 4>,
1124 premultiply_alpha: bool,
1125 ) -> Result<(), PicScaleError> {
1126 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1127 }
1128}
1129
1130impl ScalingU16 for Scaler {
1131 fn resize_rgb_u16<'a>(
1157 &'a self,
1158 store: &ImageStore<'a, u16, 3>,
1159 into: &mut ImageStoreMut<'a, u16, 3>,
1160 ) -> Result<(), PicScaleError> {
1161 self.generic_resize(store, into)
1162 }
1163
1164 fn resize_cbcr_u16<'a>(
1165 &'a self,
1166 store: &ImageStore<'a, u16, 2>,
1167 into: &mut ImageStoreMut<'a, u16, 2>,
1168 ) -> Result<(), PicScaleError> {
1169 self.generic_resize(store, into)
1170 }
1171
1172 fn resize_gray_alpha16<'a>(
1173 &'a self,
1174 store: &ImageStore<'a, u16, 2>,
1175 into: &mut ImageStoreMut<'a, u16, 2>,
1176 premultiply_alpha: bool,
1177 ) -> Result<(), PicScaleError> {
1178 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1179 }
1180
1181 fn resize_rgba_u16<'a>(
1203 &'a self,
1204 store: &ImageStore<'a, u16, 4>,
1205 into: &mut ImageStoreMut<'a, u16, 4>,
1206 premultiply_alpha: bool,
1207 ) -> Result<(), PicScaleError> {
1208 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1209 }
1210
1211 fn resize_plane_u16<'a>(
1235 &'a self,
1236 store: &ImageStore<'a, u16, 1>,
1237 into: &mut ImageStoreMut<'a, u16, 1>,
1238 ) -> Result<(), PicScaleError> {
1239 self.generic_resize(store, into)
1240 }
1241}
1242
1243impl Scaler {
1244 pub fn resize_ar30(
1254 &self,
1255 src_image: &ImageStore<u8, 4>,
1256 dst_image: &mut ImageStoreMut<u8, 4>,
1257 order: Ar30ByteOrder,
1258 ) -> Result<(), PicScaleError> {
1259 src_image.validate()?;
1260 dst_image.validate()?;
1261 let dst_size = dst_image.get_size();
1262 let dst_stride = dst_image.stride();
1263 match order {
1264 Ar30ByteOrder::Host => {
1265 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Host as usize }>(
1266 src_image.as_bytes(),
1267 src_image.stride,
1268 src_image.get_size(),
1269 dst_image.buffer.borrow_mut(),
1270 dst_stride,
1271 dst_size,
1272 self,
1273 )
1274 }
1275 Ar30ByteOrder::Network => {
1276 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Network as usize }>(
1277 src_image.as_bytes(),
1278 src_image.stride,
1279 src_image.get_size(),
1280 dst_image.buffer.borrow_mut(),
1281 dst_stride,
1282 dst_size,
1283 self,
1284 )
1285 }
1286 }
1287 }
1288
1289 pub fn resize_ra30(
1298 &self,
1299 src_image: &ImageStore<u8, 4>,
1300 dst_image: &mut ImageStoreMut<u8, 4>,
1301 order: Ar30ByteOrder,
1302 ) -> Result<(), PicScaleError> {
1303 src_image.validate()?;
1304 dst_image.validate()?;
1305 let dst_size = dst_image.get_size();
1306 let dst_stride = dst_image.stride();
1307 match order {
1308 Ar30ByteOrder::Host => {
1309 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Host as usize }>(
1310 src_image.as_bytes(),
1311 src_image.stride,
1312 src_image.get_size(),
1313 dst_image.buffer.borrow_mut(),
1314 dst_stride,
1315 dst_size,
1316 self,
1317 )
1318 }
1319 Ar30ByteOrder::Network => {
1320 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Network as usize }>(
1321 src_image.as_bytes(),
1322 src_image.stride,
1323 src_image.get_size(),
1324 dst_image.buffer.borrow_mut(),
1325 dst_stride,
1326 dst_size,
1327 self,
1328 )
1329 }
1330 }
1331 }
1332}
1333
1334#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Default)]
1336pub struct ScalingOptions {
1337 pub resampling_function: ResamplingFunction,
1338 pub premultiply_alpha: bool,
1339 pub threading_policy: ThreadingPolicy,
1340}
1341
1342pub trait ImageStoreScaling<'b, T, const N: usize>
1344where
1345 T: Clone + Copy + Debug,
1346{
1347 fn scale(
1348 &self,
1349 store: &mut ImageStoreMut<'b, T, N>,
1350 options: ScalingOptions,
1351 ) -> Result<(), PicScaleError>;
1352}
1353
1354macro_rules! def_image_scaling_alpha {
1355 ($clazz: ident, $fx_type: ident, $cn: expr) => {
1356 impl<'b> ImageStoreScaling<'b, $fx_type, $cn> for $clazz<'b> {
1357 fn scale(
1358 &self,
1359 store: &mut ImageStoreMut<'b, $fx_type, $cn>,
1360 options: ScalingOptions,
1361 ) -> Result<(), PicScaleError> {
1362 let mut scaler = Scaler::new(options.resampling_function);
1363 scaler.set_threading_policy(options.threading_policy);
1364 scaler.generic_resize_with_alpha(self, store, options.premultiply_alpha)
1365 }
1366 }
1367 };
1368}
1369
1370macro_rules! def_image_scaling {
1371 ($clazz: ident, $fx_type: ident, $cn: expr) => {
1372 impl<'b> ImageStoreScaling<'b, $fx_type, $cn> for $clazz<'b> {
1373 fn scale(
1374 &self,
1375 store: &mut ImageStoreMut<'b, $fx_type, $cn>,
1376 options: ScalingOptions,
1377 ) -> Result<(), PicScaleError> {
1378 let mut scaler = Scaler::new(options.resampling_function);
1379 scaler.set_threading_policy(options.threading_policy);
1380 scaler.generic_resize(self, store)
1381 }
1382 }
1383 };
1384}
1385
1386def_image_scaling_alpha!(Rgba8ImageStore, u8, 4);
1387def_image_scaling!(Rgb8ImageStore, u8, 3);
1388def_image_scaling!(CbCr8ImageStore, u8, 2);
1389def_image_scaling!(Planar8ImageStore, u8, 1);
1390def_image_scaling!(Planar16ImageStore, u16, 1);
1391def_image_scaling!(CbCr16ImageStore, u16, 2);
1392def_image_scaling!(Rgb16ImageStore, u16, 3);
1393def_image_scaling_alpha!(Rgba16ImageStore, u16, 4);
1394def_image_scaling!(PlanarF32ImageStore, f32, 1);
1395def_image_scaling!(CbCrF32ImageStore, f32, 2);
1396def_image_scaling!(RgbF32ImageStore, f32, 3);
1397def_image_scaling_alpha!(RgbaF32ImageStore, f32, 4);
1398
1399#[cfg(test)]
1400mod tests {
1401 use super::*;
1402
1403 #[test]
1404 fn check_rgba8_resizing_vertical() {
1405 let image_width = 255;
1406 let image_height = 512;
1407 const CN: usize = 4;
1408 let mut image = vec![0u8; image_height * image_width * CN];
1409 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1410 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1411 scaler.set_threading_policy(ThreadingPolicy::Single);
1412 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1413 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1414 scaler
1415 .resize_rgba(&src_store, &mut target_store, false)
1416 .unwrap();
1417 let target_data = target_store.buffer.borrow();
1418
1419 let resized = target_data
1420 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1421 assert_ne!(resized, 0);
1422 }
1423
1424 #[test]
1425 fn check_rgba8_resizing_both() {
1426 let image_width = 255;
1427 let image_height = 512;
1428 const CN: usize = 4;
1429 let mut image = vec![0u8; image_height * image_width * CN];
1430 image[0] = 174;
1431 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1432 scaler.set_threading_policy(ThreadingPolicy::Single);
1433 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1434 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1435 scaler
1436 .resize_rgba(&src_store, &mut target_store, false)
1437 .unwrap();
1438 let target_data = target_store.buffer.borrow();
1439
1440 let resized = target_data[0];
1441 assert_ne!(resized, 0);
1442 }
1443
1444 #[test]
1445 fn check_rgba8_resizing_alpha() {
1446 let image_width = 255;
1447 let image_height = 512;
1448 const CN: usize = 4;
1449 let mut image = vec![0u8; image_height * image_width * CN];
1450 image[0] = 174;
1451 image[7] = 1;
1452 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1453 scaler.set_threading_policy(ThreadingPolicy::Single);
1454 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1455 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1456 scaler
1457 .resize_rgba(&src_store, &mut target_store, true)
1458 .unwrap();
1459 let target_data = target_store.buffer.borrow();
1460
1461 let resized = target_data[0];
1462 assert_eq!(resized, 0);
1463 }
1464
1465 #[test]
1466 fn check_rgb8_resizing_vertical() {
1467 let image_width = 255;
1468 let image_height = 512;
1469 const CN: usize = 3;
1470 let mut image = vec![0u8; image_height * image_width * CN];
1471 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1472 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1473 scaler.set_threading_policy(ThreadingPolicy::Single);
1474 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1475 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1476 scaler.resize_rgb(&src_store, &mut target_store).unwrap();
1477 let target_data = target_store.buffer.borrow();
1478
1479 let resized = target_data
1480 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1481 assert_ne!(resized, 0);
1482 }
1483
1484 #[test]
1485 fn check_rgba10_resizing_vertical() {
1486 let image_width = 8;
1487 let image_height = 8;
1488 const CN: usize = 4;
1489 let mut image = vec![0u16; image_height * image_width * CN];
1490 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1491 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1492 scaler.set_threading_policy(ThreadingPolicy::Single);
1493 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1494 src_store.bit_depth = 10;
1495 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1496 scaler
1497 .resize_rgba_u16(&src_store, &mut target_store, false)
1498 .unwrap();
1499 let target_data = target_store.buffer.borrow();
1500
1501 let resized = target_data
1502 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1503 assert_ne!(resized, 0);
1504 }
1505
1506 #[test]
1507 fn check_rgb10_resizing_vertical() {
1508 let image_width = 8;
1509 let image_height = 4;
1510 const CN: usize = 3;
1511 let mut image = vec![0; image_height * image_width * CN];
1512 image[0] = 174;
1513 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1514 scaler.set_threading_policy(ThreadingPolicy::Single);
1515 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1516 src_store.bit_depth = 10;
1517 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1518 scaler
1519 .resize_rgb_u16(&src_store, &mut target_store)
1520 .unwrap();
1521 let target_data = target_store.buffer.borrow();
1522
1523 let resized = target_data[0];
1524 assert_ne!(resized, 0);
1525 }
1526
1527 #[test]
1528 fn check_rgb16_resizing_vertical() {
1529 let image_width = 8;
1530 let image_height = 8;
1531 const CN: usize = 3;
1532 let mut image = vec![164; image_height * image_width * CN];
1533 image[0] = 174;
1534 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1535 scaler.set_threading_policy(ThreadingPolicy::Single);
1536 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1537 src_store.bit_depth = 10;
1538 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1539 scaler
1540 .resize_rgb_u16(&src_store, &mut target_store)
1541 .unwrap();
1542 let target_data = target_store.buffer.borrow();
1543
1544 let resized = target_data[0];
1545 assert_ne!(resized, 0);
1546 }
1547
1548 #[test]
1549 fn check_rgba16_resizing_vertical() {
1550 let image_width = 8;
1551 let image_height = 8;
1552 const CN: usize = 4;
1553 let mut image = vec![0u16; image_height * image_width * CN];
1554 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1555 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1556 scaler.set_threading_policy(ThreadingPolicy::Single);
1557 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1558 src_store.bit_depth = 10;
1559 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1560 scaler
1561 .resize_rgba_u16(&src_store, &mut target_store, false)
1562 .unwrap();
1563 let target_data = target_store.buffer.borrow();
1564
1565 let resized = target_data
1566 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1567 assert_ne!(resized, 0);
1568 }
1569
1570 #[test]
1571 fn check_rgba8_nearest_vertical() {
1572 let image_width = 255;
1573 let image_height = 512;
1574 const CN: usize = 4;
1575 let mut image = vec![0u8; image_height * image_width * CN];
1576 image[image_width * CN * (image_height.div_ceil(2)) + (image_width - 1) * CN] = 174;
1577 let mut scaler = Scaler::new(ResamplingFunction::Nearest);
1578 scaler.set_threading_policy(ThreadingPolicy::Single);
1579 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1580 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1581 scaler
1582 .resize_rgba(&src_store, &mut target_store, false)
1583 .unwrap();
1584 let target_data = target_store.buffer.borrow();
1585
1586 let resized = target_data
1587 [image_width * CN * ((image_height / 2).div_ceil(2)) + (image_width - 1) * CN];
1588 assert_eq!(resized, 174);
1589 }
1590}