1#![forbid(unsafe_code)]
30use crate::ar30::{Ar30ByteOrder, Rgb30};
31use crate::convolution::{ConvolutionOptions, HorizontalConvolutionPass, VerticalConvolutionPass};
32use crate::filter_weights::{FilterBounds, FilterWeights};
33use crate::image_size::ImageSize;
34use crate::image_store::{
35 AssociateAlpha, CheckStoreDensity, ImageStore, ImageStoreMut, UnassociateAlpha,
36};
37use crate::math::gaussian::Exponential;
38use crate::math::sinc::Trigonometry;
39use crate::nearest_sampler::resize_nearest;
40use crate::pic_scale_error::PicScaleError;
41use crate::resize_ar30::resize_ar30_impl;
42use crate::support::check_image_size_overflow;
43use crate::threading_policy::ThreadingPolicy;
44use crate::{
45 CbCr8ImageStore, CbCr16ImageStore, CbCrF32ImageStore, ConstPI, ConstSqrt2, Jinc,
46 Planar8ImageStore, Planar16ImageStore, PlanarF32ImageStore, ResamplingFunction, Rgb8ImageStore,
47 Rgb16ImageStore, RgbF32ImageStore, Rgba8ImageStore, Rgba16ImageStore, RgbaF32ImageStore,
48};
49use num_traits::{AsPrimitive, Float, Signed};
50use std::fmt::Debug;
51use std::ops::{AddAssign, MulAssign, Neg};
52
53#[derive(Debug, Copy, Clone)]
54pub struct Scaler {
56 pub(crate) function: ResamplingFunction,
57 pub(crate) threading_policy: ThreadingPolicy,
58 pub workload_strategy: WorkloadStrategy,
59}
60
61pub trait Scaling {
63 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy);
76
77 fn resize_plane<'a>(
90 &'a self,
91 store: &ImageStore<'a, u8, 1>,
92 into: &mut ImageStoreMut<'a, u8, 1>,
93 ) -> Result<(), PicScaleError>;
94
95 fn resize_cbcr8<'a>(
110 &'a self,
111 store: &ImageStore<'a, u8, 2>,
112 into: &mut ImageStoreMut<'a, u8, 2>,
113 ) -> Result<(), PicScaleError>;
114
115 fn resize_gray_alpha<'a>(
130 &'a self,
131 store: &ImageStore<'a, u8, 2>,
132 into: &mut ImageStoreMut<'a, u8, 2>,
133 premultiply_alpha: bool,
134 ) -> Result<(), PicScaleError>;
135
136 fn resize_rgb<'a>(
149 &'a self,
150 store: &ImageStore<'a, u8, 3>,
151 into: &mut ImageStoreMut<'a, u8, 3>,
152 ) -> Result<(), PicScaleError>;
153
154 fn resize_rgba<'a>(
170 &'a self,
171 store: &ImageStore<'a, u8, 4>,
172 into: &mut ImageStoreMut<'a, u8, 4>,
173 premultiply_alpha: bool,
174 ) -> Result<(), PicScaleError>;
175}
176
177pub trait ScalingF32 {
179 fn resize_plane_f32<'a>(
192 &'a self,
193 store: &ImageStore<'a, f32, 1>,
194 into: &mut ImageStoreMut<'a, f32, 1>,
195 ) -> Result<(), PicScaleError>;
196
197 fn resize_cbcr_f32<'a>(
212 &'a self,
213 store: &ImageStore<'a, f32, 2>,
214 into: &mut ImageStoreMut<'a, f32, 2>,
215 ) -> Result<(), PicScaleError>;
216
217 fn resize_gray_alpha_f32<'a>(
232 &'a self,
233 store: &ImageStore<'a, f32, 2>,
234 into: &mut ImageStoreMut<'a, f32, 2>,
235 premultiply_alpha: bool,
236 ) -> Result<(), PicScaleError>;
237
238 fn resize_rgb_f32<'a>(
253 &'a self,
254 store: &ImageStore<'a, f32, 3>,
255 into: &mut ImageStoreMut<'a, f32, 3>,
256 ) -> Result<(), PicScaleError>;
257
258 fn resize_rgba_f32<'a>(
274 &'a self,
275 store: &ImageStore<'a, f32, 4>,
276 into: &mut ImageStoreMut<'a, f32, 4>,
277 premultiply_alpha: bool,
278 ) -> Result<(), PicScaleError>;
279}
280
281#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Default)]
283pub enum WorkloadStrategy {
284 PreferQuality,
286 #[default]
288 PreferSpeed,
289}
290
291pub trait ScalingU16 {
293 fn resize_plane_u16<'a>(
317 &'a self,
318 store: &ImageStore<'a, u16, 1>,
319 into: &mut ImageStoreMut<'a, u16, 1>,
320 ) -> Result<(), PicScaleError>;
321
322 fn resize_cbcr_u16<'a>(
348 &'a self,
349 store: &ImageStore<'a, u16, 2>,
350 into: &mut ImageStoreMut<'a, u16, 2>,
351 ) -> Result<(), PicScaleError>;
352
353 fn resize_gray_alpha16<'a>(
368 &'a self,
369 store: &ImageStore<'a, u16, 2>,
370 into: &mut ImageStoreMut<'a, u16, 2>,
371 premultiply_alpha: bool,
372 ) -> Result<(), PicScaleError>;
373
374 fn resize_rgb_u16<'a>(
400 &'a self,
401 store: &ImageStore<'a, u16, 3>,
402 into: &mut ImageStoreMut<'a, u16, 3>,
403 ) -> Result<(), PicScaleError>;
404
405 fn resize_rgba_u16<'a>(
433 &'a self,
434 store: &ImageStore<'a, u16, 4>,
435 into: &mut ImageStoreMut<'a, u16, 4>,
436 premultiply_alpha: bool,
437 ) -> Result<(), PicScaleError>;
438}
439
440impl Scaler {
441 pub fn new(filter: ResamplingFunction) -> Self {
446 Scaler {
447 function: filter,
448 threading_policy: ThreadingPolicy::Single,
449 workload_strategy: WorkloadStrategy::default(),
450 }
451 }
452
453 pub fn set_workload_strategy(&mut self, workload_strategy: WorkloadStrategy) {
457 self.workload_strategy = workload_strategy;
458 }
459
460 pub(crate) fn generate_weights<T>(&self, in_size: usize, out_size: usize) -> FilterWeights<T>
461 where
462 T: Copy
463 + Neg
464 + Signed
465 + Float
466 + 'static
467 + ConstPI
468 + MulAssign<T>
469 + AddAssign<T>
470 + AsPrimitive<f64>
471 + AsPrimitive<usize>
472 + Jinc<T>
473 + ConstSqrt2
474 + Default
475 + AsPrimitive<i32>
476 + Trigonometry
477 + Exponential,
478 f32: AsPrimitive<T>,
479 f64: AsPrimitive<T>,
480 i64: AsPrimitive<T>,
481 i32: AsPrimitive<T>,
482 usize: AsPrimitive<T>,
483 {
484 let resampling_filter = self.function.get_resampling_filter();
485 let scale = in_size.as_() / out_size.as_();
486 let is_resizable_kernel = resampling_filter.is_resizable_kernel;
487 let filter_scale_cutoff = match is_resizable_kernel {
488 true => scale.max(1f32.as_()),
489 false => 1f32.as_(),
490 };
491 let filter_base_size = resampling_filter.min_kernel_size;
492 let resampling_function = resampling_filter.kernel;
493
494 let is_area = resampling_filter.is_area && scale < 1.as_();
495
496 let mut bounds: Vec<FilterBounds> = vec![FilterBounds::new(0, 0); out_size];
497
498 if !is_area {
499 let window_func = resampling_filter.window;
500 let base_size: usize = (filter_base_size.as_() * filter_scale_cutoff).round().as_();
501 let kernel_size = base_size;
502 let filter_radius = base_size.as_() / 2.as_();
503 let filter_scale = 1f32.as_() / filter_scale_cutoff;
504 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
505 let mut local_filters = vec![T::default(); kernel_size];
506 let mut filter_position = 0usize;
507 let blur_scale = match window_func {
508 None => 1f32.as_(),
509 Some(window) => {
510 if window.blur.as_() > 0f32.as_() {
511 1f32.as_() / window.blur.as_()
512 } else {
513 0f32.as_()
514 }
515 }
516 };
517 for (i, bound) in bounds.iter_mut().enumerate() {
518 let center_x = ((i.as_() + 0.5.as_()) * scale).min(in_size.as_());
519 let mut weights_sum: T = 0f32.as_();
520 let mut local_filter_iteration = 0usize;
521
522 let start: usize = (center_x - filter_radius).floor().max(0f32.as_()).as_();
523 let end: usize = (center_x + filter_radius)
524 .ceil()
525 .min(start.as_() + kernel_size.as_())
526 .min(in_size.as_())
527 .as_();
528
529 let center = center_x - 0.5.as_();
530
531 for (k, filter) in (start..end).zip(local_filters.iter_mut()) {
532 let dx = k.as_() - center;
533 let weight;
534 if let Some(resampling_window) = window_func {
535 let mut x = dx.abs();
536 x = if resampling_window.blur.as_() > 0f32.as_() {
537 x * blur_scale
538 } else {
539 x
540 };
541 x = if x <= resampling_window.taper.as_() {
542 0f32.as_()
543 } else {
544 (x - resampling_window.taper.as_())
545 / (1f32.as_() - resampling_window.taper.as_())
546 };
547 let window_producer = resampling_window.window;
548 let x_kernel_scaled = x * filter_scale;
549 let window = if x < resampling_window.window_size.as_() {
550 window_producer(x_kernel_scaled * resampling_window.window_size.as_())
551 } else {
552 0f32.as_()
553 };
554 weight = window * resampling_function(x_kernel_scaled);
555 } else {
556 let dx = dx.abs();
557 weight = resampling_function(dx * filter_scale);
558 }
559 weights_sum += weight;
560 *filter = weight;
561 local_filter_iteration += 1;
562 }
563
564 let alpha: T = 0.7f32.as_();
565 if resampling_filter.is_ewa && !local_filters.is_empty() {
566 weights_sum = local_filters[0];
567 for j in 1..local_filter_iteration {
568 let new_weight =
569 alpha * local_filters[j] + (1f32.as_() - alpha) * local_filters[j - 1];
570 local_filters[j] = new_weight;
571 weights_sum += new_weight;
572 }
573 }
574
575 let size = end - start;
576
577 *bound = FilterBounds::new(start, size);
578
579 if weights_sum != 0f32.as_() {
580 let recpeq = 1f32.as_() / weights_sum;
581
582 for (dst, src) in weights
583 .iter_mut()
584 .skip(filter_position)
585 .take(size)
586 .zip(local_filters.iter().take(size))
587 {
588 *dst = *src * recpeq;
589 }
590 }
591
592 filter_position += kernel_size;
593 }
594
595 FilterWeights::<T>::new(
596 weights,
597 kernel_size,
598 kernel_size,
599 out_size,
600 filter_radius.as_(),
601 bounds,
602 )
603 } else {
604 let inv_scale: T = 1.as_() / scale;
608 let kernel_size = 2;
609 let filter_radius: T = 1.as_();
610 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
611 let mut local_filters = vec![T::default(); kernel_size];
612 let mut filter_position = 0usize;
613
614 for (i, bound) in bounds.iter_mut().enumerate() {
615 let mut weights_sum: T = 0f32.as_();
616
617 let sx: T = (i.as_() * scale).floor();
618 let fx = (i as i64 + 1).as_() - (sx + 1.as_()) * inv_scale;
619 let dx = if fx <= 0.as_() {
620 0.as_()
621 } else {
622 fx - fx.floor()
623 };
624 let dx = dx.abs();
625 let weight0 = 1.as_() - dx;
626 let weight1: T = dx;
627 local_filters[0] = weight0;
628 local_filters[1] = weight1;
629
630 let start: usize = sx.floor().max(0f32.as_()).as_();
631 let end: usize = (sx + kernel_size.as_())
632 .ceil()
633 .min(start.as_() + kernel_size.as_())
634 .min(in_size.as_())
635 .as_();
636
637 let size = end - start;
638
639 weights_sum += weight0;
640 if size > 1 {
641 weights_sum += weight1;
642 }
643 *bound = FilterBounds::new(start, size);
644
645 if weights_sum != 0f32.as_() {
646 let recpeq = 1f32.as_() / weights_sum;
647
648 for (dst, src) in weights
649 .iter_mut()
650 .skip(filter_position)
651 .take(size)
652 .zip(local_filters.iter().take(size))
653 {
654 *dst = *src * recpeq;
655 }
656 } else {
657 weights[filter_position] = 1.as_();
658 }
659
660 filter_position += kernel_size;
661 }
662
663 FilterWeights::new(
664 weights,
665 kernel_size,
666 kernel_size,
667 out_size,
668 filter_radius.as_(),
669 bounds,
670 )
671 }
672 }
673}
674
675impl Scaler {
676 pub(crate) fn generic_resize<
677 'a,
678 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
679 const N: usize,
680 >(
681 &self,
682 store: &ImageStore<'a, T, N>,
683 into: &mut ImageStoreMut<'a, T, N>,
684 ) -> Result<(), PicScaleError>
685 where
686 ImageStore<'a, T, N>: VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N>,
687 ImageStoreMut<'a, T, N>: CheckStoreDensity,
688 {
689 let new_size = into.get_size();
690 into.validate()?;
691 store.validate()?;
692 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
693 return Err(PicScaleError::ZeroImageDimensions);
694 }
695
696 if check_image_size_overflow(store.width, store.height, store.channels) {
697 return Err(PicScaleError::SourceImageIsTooLarge);
698 }
699
700 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
701 return Err(PicScaleError::DestinationImageIsTooLarge);
702 }
703
704 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
705 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
706 }
707
708 if store.width == new_size.width && store.height == new_size.height {
709 store.copied_to_mut(into);
710 return Ok(());
711 }
712
713 let nova_thread_pool = self
714 .threading_policy
715 .get_nova_pool(ImageSize::new(new_size.width, new_size.height));
716
717 if self.function == ResamplingFunction::Nearest {
718 resize_nearest::<T, N>(
719 store.buffer.as_ref(),
720 store.width,
721 store.height,
722 into.buffer.borrow_mut(),
723 new_size.width,
724 new_size.height,
725 &nova_thread_pool,
726 );
727 return Ok(());
728 }
729
730 let should_do_horizontal = store.width != new_size.width;
731 let should_do_vertical = store.height != new_size.height;
732 assert!(should_do_horizontal || should_do_vertical);
733
734 if should_do_vertical && should_do_horizontal {
735 let mut target_vertical = vec![T::default(); store.width * new_size.height * N];
736
737 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
738 &mut target_vertical,
739 store.width,
740 new_size.height,
741 )?;
742 new_image_vertical.bit_depth = into.bit_depth;
743 let vertical_filters = self.generate_weights(store.height, new_size.height);
744 let options = ConvolutionOptions::new(self.workload_strategy);
745 store.convolve_vertical(
746 vertical_filters,
747 &mut new_image_vertical,
748 &nova_thread_pool,
749 options,
750 );
751
752 let new_immutable_store = ImageStore::<T, N> {
753 buffer: std::borrow::Cow::Owned(target_vertical),
754 channels: N,
755 width: store.width,
756 height: new_size.height,
757 stride: store.width * N,
758 bit_depth: into.bit_depth,
759 };
760 let horizontal_filters = self.generate_weights(store.width, new_size.width);
761 let options = ConvolutionOptions::new(self.workload_strategy);
762 new_immutable_store.convolve_horizontal(
763 horizontal_filters,
764 into,
765 &nova_thread_pool,
766 options,
767 );
768 Ok(())
769 } else if should_do_vertical {
770 let vertical_filters = self.generate_weights(store.height, new_size.height);
771 let options = ConvolutionOptions::new(self.workload_strategy);
772 store.convolve_vertical(vertical_filters, into, &nova_thread_pool, options);
773 Ok(())
774 } else {
775 assert!(should_do_horizontal);
776 let horizontal_filters = self.generate_weights(store.width, new_size.width);
777 let options = ConvolutionOptions::new(self.workload_strategy);
778 store.convolve_horizontal(horizontal_filters, into, &nova_thread_pool, options);
779 Ok(())
780 }
781 }
782
783 fn forward_resize_with_alpha<
784 'a,
785 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
786 const N: usize,
787 >(
788 &self,
789 store: &ImageStore<'a, T, N>,
790 into: &mut ImageStoreMut<'a, T, N>,
791 premultiply_alpha_requested: bool,
792 nova_thread_pool: &novtb::ThreadPool,
793 ) -> Result<(), PicScaleError>
794 where
795 ImageStore<'a, T, N>:
796 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
797 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
798 {
799 let new_size = into.get_size();
800 let mut src_store: std::borrow::Cow<'_, ImageStore<'_, T, N>> =
801 std::borrow::Cow::Borrowed(store);
802
803 let mut has_alpha_premultiplied = true;
804
805 if premultiply_alpha_requested {
806 let is_alpha_premultiplication_reasonable =
807 src_store.is_alpha_premultiplication_needed();
808 if is_alpha_premultiplication_reasonable {
809 let mut target_premultiplied =
810 vec![T::default(); src_store.width * src_store.height * N];
811 let mut new_store = ImageStoreMut::<T, N>::from_slice(
812 &mut target_premultiplied,
813 src_store.width,
814 src_store.height,
815 )?;
816 new_store.bit_depth = into.bit_depth;
817 src_store.premultiply_alpha(&mut new_store, nova_thread_pool);
818 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
819 buffer: std::borrow::Cow::Owned(target_premultiplied),
820 channels: N,
821 width: src_store.width,
822 height: src_store.height,
823 stride: src_store.width * N,
824 bit_depth: into.bit_depth,
825 });
826 has_alpha_premultiplied = true;
827 }
828 }
829
830 let mut target_vertical = vec![T::default(); src_store.width * new_size.height * N];
831
832 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
833 &mut target_vertical,
834 src_store.width,
835 new_size.height,
836 )?;
837 new_image_vertical.bit_depth = into.bit_depth;
838 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
839 let options = ConvolutionOptions::new(self.workload_strategy);
840 src_store.convolve_vertical(
841 vertical_filters,
842 &mut new_image_vertical,
843 nova_thread_pool,
844 options,
845 );
846
847 let new_immutable_store = ImageStore::<T, N> {
848 buffer: std::borrow::Cow::Owned(target_vertical),
849 channels: N,
850 width: src_store.width,
851 height: new_size.height,
852 stride: src_store.width * N,
853 bit_depth: into.bit_depth,
854 };
855 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
856 let options = ConvolutionOptions::new(self.workload_strategy);
857 new_immutable_store.convolve_horizontal(
858 horizontal_filters,
859 into,
860 nova_thread_pool,
861 options,
862 );
863
864 if premultiply_alpha_requested && has_alpha_premultiplied {
865 into.unpremultiply_alpha(nova_thread_pool, self.workload_strategy);
866 }
867
868 Ok(())
869 }
870
871 fn forward_resize_vertical_with_alpha<
872 'a,
873 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
874 const N: usize,
875 >(
876 &self,
877 store: &ImageStore<'a, T, N>,
878 into: &mut ImageStoreMut<'a, T, N>,
879 premultiply_alpha_requested: bool,
880 nova_thread_pool: &novtb::ThreadPool,
881 ) -> Result<(), PicScaleError>
882 where
883 ImageStore<'a, T, N>:
884 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
885 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
886 {
887 let new_size = into.get_size();
888 let mut src_store = std::borrow::Cow::Borrowed(store);
889
890 let mut has_alpha_premultiplied = true;
891
892 if premultiply_alpha_requested {
893 let is_alpha_premultiplication_reasonable =
894 src_store.is_alpha_premultiplication_needed();
895 if is_alpha_premultiplication_reasonable {
896 let mut target_premultiplied =
897 vec![T::default(); src_store.width * src_store.height * N];
898 let mut new_store = ImageStoreMut::<T, N>::from_slice(
899 &mut target_premultiplied,
900 src_store.width,
901 src_store.height,
902 )?;
903 new_store.bit_depth = into.bit_depth;
904 src_store.premultiply_alpha(&mut new_store, nova_thread_pool);
905 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
906 buffer: std::borrow::Cow::Owned(target_premultiplied),
907 channels: N,
908 width: src_store.width,
909 height: src_store.height,
910 stride: src_store.width * N,
911 bit_depth: into.bit_depth,
912 });
913 has_alpha_premultiplied = true;
914 }
915 }
916
917 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
918 let options = ConvolutionOptions::new(self.workload_strategy);
919 src_store.convolve_vertical(vertical_filters, into, nova_thread_pool, options);
920
921 if premultiply_alpha_requested && has_alpha_premultiplied {
922 into.unpremultiply_alpha(nova_thread_pool, self.workload_strategy);
923 }
924
925 Ok(())
926 }
927
928 fn forward_resize_horizontal_with_alpha<
929 'a,
930 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
931 const N: usize,
932 >(
933 &self,
934 store: &ImageStore<'a, T, N>,
935 into: &mut ImageStoreMut<'a, T, N>,
936 premultiply_alpha_requested: bool,
937 nova_thread_pool: &novtb::ThreadPool,
938 ) -> Result<(), PicScaleError>
939 where
940 ImageStore<'a, T, N>:
941 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
942 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
943 {
944 let new_size = into.get_size();
945 let mut src_store = std::borrow::Cow::Borrowed(store);
946
947 let mut has_alpha_premultiplied = true;
948
949 if premultiply_alpha_requested {
950 let is_alpha_premultiplication_reasonable =
951 src_store.is_alpha_premultiplication_needed();
952 if is_alpha_premultiplication_reasonable {
953 let mut target_premultiplied =
954 vec![T::default(); src_store.width * src_store.height * N];
955 let mut new_store = ImageStoreMut::<T, N>::from_slice(
956 &mut target_premultiplied,
957 src_store.width,
958 src_store.height,
959 )?;
960 new_store.bit_depth = into.bit_depth;
961 src_store.premultiply_alpha(&mut new_store, nova_thread_pool);
962 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
963 buffer: std::borrow::Cow::Owned(target_premultiplied),
964 channels: N,
965 width: src_store.width,
966 height: src_store.height,
967 stride: src_store.width * N,
968 bit_depth: into.bit_depth,
969 });
970 has_alpha_premultiplied = true;
971 }
972 }
973
974 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
975 let options = ConvolutionOptions::new(self.workload_strategy);
976 src_store.convolve_horizontal(horizontal_filters, into, nova_thread_pool, options);
977
978 if premultiply_alpha_requested && has_alpha_premultiplied {
979 into.unpremultiply_alpha(nova_thread_pool, self.workload_strategy);
980 }
981
982 Ok(())
983 }
984
985 pub(crate) fn generic_resize_with_alpha<
986 'a,
987 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
988 const N: usize,
989 >(
990 &self,
991 store: &ImageStore<'a, T, N>,
992 into: &mut ImageStoreMut<'a, T, N>,
993 premultiply_alpha_requested: bool,
994 ) -> Result<(), PicScaleError>
995 where
996 ImageStore<'a, T, N>:
997 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
998 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
999 {
1000 let new_size = into.get_size();
1001 into.validate()?;
1002 store.validate()?;
1003 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
1004 return Err(PicScaleError::ZeroImageDimensions);
1005 }
1006
1007 if check_image_size_overflow(store.width, store.height, store.channels) {
1008 return Err(PicScaleError::SourceImageIsTooLarge);
1009 }
1010
1011 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
1012 return Err(PicScaleError::DestinationImageIsTooLarge);
1013 }
1014
1015 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
1016 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
1017 }
1018
1019 if store.width == new_size.width && store.height == new_size.height {
1020 store.copied_to_mut(into);
1021 return Ok(());
1022 }
1023
1024 let nova_thread_pool = self
1025 .threading_policy
1026 .get_nova_pool(ImageSize::new(new_size.width, new_size.height));
1027
1028 if self.function == ResamplingFunction::Nearest {
1029 resize_nearest::<T, N>(
1030 store.buffer.as_ref(),
1031 store.width,
1032 store.height,
1033 into.buffer.borrow_mut(),
1034 new_size.width,
1035 new_size.height,
1036 &nova_thread_pool,
1037 );
1038 return Ok(());
1039 }
1040
1041 let should_do_horizontal = store.width != new_size.width;
1042 let should_do_vertical = store.height != new_size.height;
1043 assert!(should_do_horizontal || should_do_vertical);
1044
1045 if should_do_vertical && should_do_horizontal {
1046 self.forward_resize_with_alpha(
1047 store,
1048 into,
1049 premultiply_alpha_requested,
1050 &nova_thread_pool,
1051 )
1052 } else if should_do_vertical {
1053 self.forward_resize_vertical_with_alpha(
1054 store,
1055 into,
1056 premultiply_alpha_requested,
1057 &nova_thread_pool,
1058 )
1059 } else {
1060 assert!(should_do_horizontal);
1061 self.forward_resize_horizontal_with_alpha(
1062 store,
1063 into,
1064 premultiply_alpha_requested,
1065 &nova_thread_pool,
1066 )
1067 }
1068 }
1069}
1070
1071impl Scaling for Scaler {
1072 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy) {
1073 self.threading_policy = threading_policy;
1074 }
1075
1076 fn resize_plane<'a>(
1077 &'a self,
1078 store: &ImageStore<'a, u8, 1>,
1079 into: &mut ImageStoreMut<'a, u8, 1>,
1080 ) -> Result<(), PicScaleError> {
1081 self.generic_resize(store, into)
1082 }
1083
1084 fn resize_cbcr8<'a>(
1085 &'a self,
1086 store: &ImageStore<'a, u8, 2>,
1087 into: &mut ImageStoreMut<'a, u8, 2>,
1088 ) -> Result<(), PicScaleError> {
1089 self.generic_resize(store, into)
1090 }
1091
1092 fn resize_gray_alpha<'a>(
1093 &'a self,
1094 store: &ImageStore<'a, u8, 2>,
1095 into: &mut ImageStoreMut<'a, u8, 2>,
1096 premultiply_alpha: bool,
1097 ) -> Result<(), PicScaleError> {
1098 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1099 }
1100
1101 fn resize_rgb<'a>(
1102 &'a self,
1103 store: &ImageStore<'a, u8, 3>,
1104 into: &mut ImageStoreMut<'a, u8, 3>,
1105 ) -> Result<(), PicScaleError> {
1106 self.generic_resize(store, into)
1107 }
1108
1109 fn resize_rgba<'a>(
1110 &'a self,
1111 store: &ImageStore<'a, u8, 4>,
1112 into: &mut ImageStoreMut<'a, u8, 4>,
1113 premultiply_alpha: bool,
1114 ) -> Result<(), PicScaleError> {
1115 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1116 }
1117}
1118
1119impl ScalingF32 for Scaler {
1120 fn resize_plane_f32<'a>(
1121 &'a self,
1122 store: &ImageStore<'a, f32, 1>,
1123 into: &mut ImageStoreMut<'a, f32, 1>,
1124 ) -> Result<(), PicScaleError> {
1125 self.generic_resize(store, into)
1126 }
1127
1128 fn resize_cbcr_f32<'a>(
1129 &'a self,
1130 store: &ImageStore<'a, f32, 2>,
1131 into: &mut ImageStoreMut<'a, f32, 2>,
1132 ) -> Result<(), PicScaleError> {
1133 self.generic_resize(store, into)
1134 }
1135
1136 fn resize_gray_alpha_f32<'a>(
1137 &'a self,
1138 store: &ImageStore<'a, f32, 2>,
1139 into: &mut ImageStoreMut<'a, f32, 2>,
1140 premultiply_alpha: bool,
1141 ) -> Result<(), PicScaleError> {
1142 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1143 }
1144
1145 fn resize_rgb_f32<'a>(
1146 &'a self,
1147 store: &ImageStore<'a, f32, 3>,
1148 into: &mut ImageStoreMut<'a, f32, 3>,
1149 ) -> Result<(), PicScaleError> {
1150 self.generic_resize(store, into)
1151 }
1152
1153 fn resize_rgba_f32<'a>(
1154 &'a self,
1155 store: &ImageStore<'a, f32, 4>,
1156 into: &mut ImageStoreMut<'a, f32, 4>,
1157 premultiply_alpha: bool,
1158 ) -> Result<(), PicScaleError> {
1159 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1160 }
1161}
1162
1163impl ScalingU16 for Scaler {
1164 fn resize_rgb_u16<'a>(
1190 &'a self,
1191 store: &ImageStore<'a, u16, 3>,
1192 into: &mut ImageStoreMut<'a, u16, 3>,
1193 ) -> Result<(), PicScaleError> {
1194 self.generic_resize(store, into)
1195 }
1196
1197 fn resize_cbcr_u16<'a>(
1198 &'a self,
1199 store: &ImageStore<'a, u16, 2>,
1200 into: &mut ImageStoreMut<'a, u16, 2>,
1201 ) -> Result<(), PicScaleError> {
1202 self.generic_resize(store, into)
1203 }
1204
1205 fn resize_gray_alpha16<'a>(
1206 &'a self,
1207 store: &ImageStore<'a, u16, 2>,
1208 into: &mut ImageStoreMut<'a, u16, 2>,
1209 premultiply_alpha: bool,
1210 ) -> Result<(), PicScaleError> {
1211 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1212 }
1213
1214 fn resize_rgba_u16<'a>(
1236 &'a self,
1237 store: &ImageStore<'a, u16, 4>,
1238 into: &mut ImageStoreMut<'a, u16, 4>,
1239 premultiply_alpha: bool,
1240 ) -> Result<(), PicScaleError> {
1241 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1242 }
1243
1244 fn resize_plane_u16<'a>(
1268 &'a self,
1269 store: &ImageStore<'a, u16, 1>,
1270 into: &mut ImageStoreMut<'a, u16, 1>,
1271 ) -> Result<(), PicScaleError> {
1272 self.generic_resize(store, into)
1273 }
1274}
1275
1276impl Scaler {
1277 pub fn resize_ar30(
1287 &self,
1288 src_image: &ImageStore<u8, 4>,
1289 dst_image: &mut ImageStoreMut<u8, 4>,
1290 order: Ar30ByteOrder,
1291 ) -> Result<(), PicScaleError> {
1292 src_image.validate()?;
1293 dst_image.validate()?;
1294 let dst_size = dst_image.get_size();
1295 let dst_stride = dst_image.stride();
1296 match order {
1297 Ar30ByteOrder::Host => {
1298 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Host as usize }>(
1299 src_image.as_bytes(),
1300 src_image.stride,
1301 src_image.get_size(),
1302 dst_image.buffer.borrow_mut(),
1303 dst_stride,
1304 dst_size,
1305 self,
1306 )
1307 }
1308 Ar30ByteOrder::Network => {
1309 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Network as usize }>(
1310 src_image.as_bytes(),
1311 src_image.stride,
1312 src_image.get_size(),
1313 dst_image.buffer.borrow_mut(),
1314 dst_stride,
1315 dst_size,
1316 self,
1317 )
1318 }
1319 }
1320 }
1321
1322 pub fn resize_ra30(
1331 &self,
1332 src_image: &ImageStore<u8, 4>,
1333 dst_image: &mut ImageStoreMut<u8, 4>,
1334 order: Ar30ByteOrder,
1335 ) -> Result<(), PicScaleError> {
1336 src_image.validate()?;
1337 dst_image.validate()?;
1338 let dst_size = dst_image.get_size();
1339 let dst_stride = dst_image.stride();
1340 match order {
1341 Ar30ByteOrder::Host => {
1342 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Host as usize }>(
1343 src_image.as_bytes(),
1344 src_image.stride,
1345 src_image.get_size(),
1346 dst_image.buffer.borrow_mut(),
1347 dst_stride,
1348 dst_size,
1349 self,
1350 )
1351 }
1352 Ar30ByteOrder::Network => {
1353 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Network as usize }>(
1354 src_image.as_bytes(),
1355 src_image.stride,
1356 src_image.get_size(),
1357 dst_image.buffer.borrow_mut(),
1358 dst_stride,
1359 dst_size,
1360 self,
1361 )
1362 }
1363 }
1364 }
1365}
1366
1367#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Default)]
1369pub struct ScalingOptions {
1370 pub resampling_function: ResamplingFunction,
1371 pub premultiply_alpha: bool,
1372 pub threading_policy: ThreadingPolicy,
1373}
1374
1375pub trait ImageStoreScaling<'b, T, const N: usize>
1377where
1378 T: Clone + Copy + Debug,
1379{
1380 fn scale(
1381 &self,
1382 store: &mut ImageStoreMut<'b, T, N>,
1383 options: ScalingOptions,
1384 ) -> Result<(), PicScaleError>;
1385}
1386
1387macro_rules! def_image_scaling_alpha {
1388 ($clazz: ident, $fx_type: ident, $cn: expr) => {
1389 impl<'b> ImageStoreScaling<'b, $fx_type, $cn> for $clazz<'b> {
1390 fn scale(
1391 &self,
1392 store: &mut ImageStoreMut<'b, $fx_type, $cn>,
1393 options: ScalingOptions,
1394 ) -> Result<(), PicScaleError> {
1395 let mut scaler = Scaler::new(options.resampling_function);
1396 scaler.set_threading_policy(options.threading_policy);
1397 scaler.generic_resize_with_alpha(self, store, options.premultiply_alpha)
1398 }
1399 }
1400 };
1401}
1402
1403macro_rules! def_image_scaling {
1404 ($clazz: ident, $fx_type: ident, $cn: expr) => {
1405 impl<'b> ImageStoreScaling<'b, $fx_type, $cn> for $clazz<'b> {
1406 fn scale(
1407 &self,
1408 store: &mut ImageStoreMut<'b, $fx_type, $cn>,
1409 options: ScalingOptions,
1410 ) -> Result<(), PicScaleError> {
1411 let mut scaler = Scaler::new(options.resampling_function);
1412 scaler.set_threading_policy(options.threading_policy);
1413 scaler.generic_resize(self, store)
1414 }
1415 }
1416 };
1417}
1418
1419def_image_scaling_alpha!(Rgba8ImageStore, u8, 4);
1420def_image_scaling!(Rgb8ImageStore, u8, 3);
1421def_image_scaling!(CbCr8ImageStore, u8, 2);
1422def_image_scaling!(Planar8ImageStore, u8, 1);
1423def_image_scaling!(Planar16ImageStore, u16, 1);
1424def_image_scaling!(CbCr16ImageStore, u16, 2);
1425def_image_scaling!(Rgb16ImageStore, u16, 3);
1426def_image_scaling_alpha!(Rgba16ImageStore, u16, 4);
1427def_image_scaling!(PlanarF32ImageStore, f32, 1);
1428def_image_scaling!(CbCrF32ImageStore, f32, 2);
1429def_image_scaling!(RgbF32ImageStore, f32, 3);
1430def_image_scaling_alpha!(RgbaF32ImageStore, f32, 4);
1431
1432#[cfg(test)]
1433mod tests {
1434 use super::*;
1435
1436 macro_rules! check_rgba8 {
1437 ($dst: expr, $image_width: expr, $max: expr) => {
1438 {
1439 for (y, row) in $dst.chunks_exact($image_width * 4).enumerate() {
1440 for (i, dst) in row.chunks_exact(4).enumerate() {
1441 let diff0 = (dst[0] as i32 - 124).abs();
1442 let diff1 = (dst[1] as i32 - 41).abs();
1443 let diff2 = (dst[2] as i32 - 99).abs();
1444 let diff3 = (dst[3] as i32 - 77).abs();
1445 assert!(
1446 diff0 < $max,
1447 "Diff for channel 0 is expected < {}, but it was {diff0}, at (y: {y}, x: {i})",
1448 $max
1449 );
1450 assert!(
1451 diff1 < $max,
1452 "Diff for channel 1 is expected < {}, but it was {diff1}, at (y: {y}, x: {i})",
1453 $max
1454 );
1455 assert!(
1456 diff2 < $max,
1457 "Diff for channel 2 is expected < {}, but it was {diff2}, at (y: {y}, x: {i})",
1458 $max
1459 );
1460 assert!(
1461 diff3 < $max,
1462 "Diff for channel 3 is expected < {}, but it was {diff3}, at (y: {y}, x: {i})",
1463 $max
1464 );
1465 }
1466 }
1467 }
1468 };
1469 }
1470
1471 macro_rules! check_rgb16 {
1472 ($dst: expr, $image_width: expr, $max: expr) => {
1473 {
1474 for (y, row) in $dst.chunks_exact($image_width * 3).enumerate() {
1475 for (i, dst) in row.chunks_exact(3).enumerate() {
1476 let diff0 = (dst[0] as i32 - 124).abs();
1477 let diff1 = (dst[1] as i32 - 41).abs();
1478 let diff2 = (dst[2] as i32 - 99).abs();
1479 assert!(
1480 diff0 < $max,
1481 "Diff for channel 0 is expected < {}, but it was {diff0}, at (y: {y}, x: {i})",
1482 $max
1483 );
1484 assert!(
1485 diff1 < $max,
1486 "Diff for channel 1 is expected < {}, but it was {diff1}, at (y: {y}, x: {i})",
1487 $max
1488 );
1489 assert!(
1490 diff2 < $max,
1491 "Diff for channel 2 is expected < {}, but it was {diff2}, at (y: {y}, x: {i})",
1492 $max
1493 );
1494 }
1495 }
1496 }
1497 };
1498 }
1499
1500 #[test]
1501 fn check_rgba8_resizing_vertical() {
1502 let image_width = 255;
1503 let image_height = 512;
1504 const CN: usize = 4;
1505 let mut image = vec![0u8; image_height * image_width * CN];
1506 for dst in image.chunks_exact_mut(4) {
1507 dst[0] = 124;
1508 dst[1] = 41;
1509 dst[2] = 99;
1510 dst[3] = 77;
1511 }
1512 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1513 scaler.set_threading_policy(ThreadingPolicy::Single);
1514 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1515 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1516 scaler
1517 .resize_rgba(&src_store, &mut target_store, false)
1518 .unwrap();
1519 let target_data = target_store.buffer.borrow();
1520 check_rgba8!(target_data, image_width, 34);
1521 }
1522
1523 #[test]
1524 fn check_rgba8_resizing_both() {
1525 let image_width = 255;
1526 let image_height = 512;
1527 const CN: usize = 4;
1528 let mut image = vec![0u8; image_height * image_width * CN];
1529 for dst in image.chunks_exact_mut(4) {
1530 dst[0] = 124;
1531 dst[1] = 41;
1532 dst[2] = 99;
1533 dst[3] = 77;
1534 }
1535 image[3] = 78;
1536 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1537 scaler.set_threading_policy(ThreadingPolicy::Single);
1538 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1539 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1540 scaler
1541 .resize_rgba(&src_store, &mut target_store, false)
1542 .unwrap();
1543 let target_data = target_store.buffer.borrow();
1544 check_rgba8!(target_data, image_width, 34);
1545 }
1546
1547 #[test]
1548 fn check_rgba8_resizing_alpha() {
1549 let image_width = 255;
1550 let image_height = 512;
1551 const CN: usize = 4;
1552 let mut image = vec![0u8; image_height * image_width * CN];
1553 for dst in image.chunks_exact_mut(4) {
1554 dst[0] = 124;
1555 dst[1] = 41;
1556 dst[2] = 99;
1557 dst[3] = 77;
1558 }
1559 image[3] = 78;
1560 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1561 scaler.set_threading_policy(ThreadingPolicy::Single);
1562 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1563 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1564 scaler
1565 .resize_rgba(&src_store, &mut target_store, true)
1566 .unwrap();
1567 let target_data = target_store.buffer.borrow();
1568 check_rgba8!(target_data, image_width, 126);
1569 }
1570
1571 #[test]
1572 fn check_rgb8_resizing_vertical() {
1573 let image_width = 255;
1574 let image_height = 512;
1575 const CN: usize = 3;
1576 let mut image = vec![0u8; image_height * image_width * CN];
1577 for dst in image.chunks_exact_mut(3) {
1578 dst[0] = 124;
1579 dst[1] = 41;
1580 dst[2] = 99;
1581 }
1582 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1583 scaler.set_threading_policy(ThreadingPolicy::Single);
1584 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1585 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1586 scaler.resize_rgb(&src_store, &mut target_store).unwrap();
1587 let target_data = target_store.buffer.borrow();
1588
1589 check_rgb16!(target_data, image_width, 85);
1590 }
1591
1592 #[test]
1593 fn check_rgb8_resizing_vertical_threading() {
1594 let image_width = 255;
1595 let image_height = 512;
1596 const CN: usize = 3;
1597 let mut image = vec![0u8; image_height * image_width * CN];
1598 for dst in image.chunks_exact_mut(3) {
1599 dst[0] = 124;
1600 dst[1] = 41;
1601 dst[2] = 99;
1602 }
1603 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1604 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1605 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1606 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1607 scaler.resize_rgb(&src_store, &mut target_store).unwrap();
1608 let target_data = target_store.buffer.borrow();
1609
1610 check_rgb16!(target_data, image_width, 85);
1611 }
1612
1613 #[test]
1614 fn check_rgba10_resizing_vertical() {
1615 let image_width = 8;
1616 let image_height = 8;
1617 const CN: usize = 4;
1618 let mut image = vec![0u16; image_height * image_width * CN];
1619 for dst in image.chunks_exact_mut(4) {
1620 dst[0] = 124;
1621 dst[1] = 41;
1622 dst[2] = 99;
1623 dst[3] = 77;
1624 }
1625 image[3] = 78;
1626 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1627 scaler.set_threading_policy(ThreadingPolicy::Single);
1628 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1629 src_store.bit_depth = 10;
1630 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1631 scaler
1632 .resize_rgba_u16(&src_store, &mut target_store, false)
1633 .unwrap();
1634 let target_data = target_store.buffer.borrow();
1635
1636 check_rgba8!(target_data, image_width, 60);
1637 }
1638
1639 #[test]
1640 fn check_rgb10_resizing_vertical() {
1641 let image_width = 8;
1642 let image_height = 4;
1643 const CN: usize = 3;
1644 let mut image = vec![0; image_height * image_width * CN];
1645 for dst in image.chunks_exact_mut(3) {
1646 dst[0] = 124;
1647 dst[1] = 41;
1648 dst[2] = 99;
1649 }
1650 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1651 scaler.set_threading_policy(ThreadingPolicy::Single);
1652 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1653 src_store.bit_depth = 10;
1654 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1655 scaler
1656 .resize_rgb_u16(&src_store, &mut target_store)
1657 .unwrap();
1658 let target_data = target_store.buffer.borrow();
1659
1660 check_rgb16!(target_data, image_width, 85);
1661 }
1662
1663 #[test]
1664 fn check_rgb10_resizing_vertical_adaptive() {
1665 let image_width = 8;
1666 let image_height = 4;
1667 const CN: usize = 3;
1668 let mut image = vec![0; image_height * image_width * CN];
1669 for dst in image.chunks_exact_mut(3) {
1670 dst[0] = 124;
1671 dst[1] = 41;
1672 dst[2] = 99;
1673 }
1674 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1675 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1676 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1677 src_store.bit_depth = 10;
1678 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1679 scaler
1680 .resize_rgb_u16(&src_store, &mut target_store)
1681 .unwrap();
1682 let target_data = target_store.buffer.borrow();
1683
1684 check_rgb16!(target_data, image_width, 85);
1685 }
1686
1687 #[test]
1688 fn check_rgb16_resizing_vertical() {
1689 let image_width = 8;
1690 let image_height = 8;
1691 const CN: usize = 3;
1692 let mut image = vec![164; image_height * image_width * CN];
1693 for dst in image.chunks_exact_mut(3) {
1694 dst[0] = 124;
1695 dst[1] = 41;
1696 dst[2] = 99;
1697 }
1698 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1699 scaler.set_threading_policy(ThreadingPolicy::Single);
1700 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1701 src_store.bit_depth = 10;
1702 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1703 scaler
1704 .resize_rgb_u16(&src_store, &mut target_store)
1705 .unwrap();
1706 let target_data = target_store.buffer.borrow();
1707
1708 check_rgb16!(target_data, image_width, 100);
1709 }
1710
1711 #[test]
1712 fn check_rgba16_resizing_vertical() {
1713 let image_width = 8;
1714 let image_height = 8;
1715 const CN: usize = 4;
1716 let mut image = vec![0u16; image_height * image_width * CN];
1717 for dst in image.chunks_exact_mut(4) {
1718 dst[0] = 124;
1719 dst[1] = 41;
1720 dst[2] = 99;
1721 dst[3] = 255;
1722 }
1723 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1724 scaler.set_threading_policy(ThreadingPolicy::Single);
1725 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1726 src_store.bit_depth = 10;
1727 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1728 scaler
1729 .resize_rgba_u16(&src_store, &mut target_store, false)
1730 .unwrap();
1731 let target_data = target_store.buffer.borrow();
1732
1733 check_rgba8!(target_data, image_width, 180);
1734 }
1735
1736 #[test]
1737 fn check_rgba16_resizing_vertical_threading() {
1738 let image_width = 8;
1739 let image_height = 8;
1740 const CN: usize = 4;
1741 let mut image = vec![0u16; image_height * image_width * CN];
1742 for dst in image.chunks_exact_mut(4) {
1743 dst[0] = 124;
1744 dst[1] = 41;
1745 dst[2] = 99;
1746 dst[3] = 255;
1747 }
1748 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1749 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1750 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1751 src_store.bit_depth = 10;
1752 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1753 scaler
1754 .resize_rgba_u16(&src_store, &mut target_store, false)
1755 .unwrap();
1756 let target_data = target_store.buffer.borrow();
1757
1758 check_rgba8!(target_data, image_width, 180);
1759 }
1760
1761 #[test]
1762 fn check_rgba8_nearest_vertical() {
1763 let image_width = 255;
1764 let image_height = 512;
1765 const CN: usize = 4;
1766 let mut image = vec![0u8; image_height * image_width * CN];
1767 for dst in image.chunks_exact_mut(4) {
1768 dst[0] = 124;
1769 dst[1] = 41;
1770 dst[2] = 99;
1771 dst[3] = 77;
1772 }
1773 let mut scaler = Scaler::new(ResamplingFunction::Nearest);
1774 scaler.set_threading_policy(ThreadingPolicy::Single);
1775 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1776 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1777 scaler
1778 .resize_rgba(&src_store, &mut target_store, false)
1779 .unwrap();
1780 let target_data = target_store.buffer.borrow();
1781
1782 check_rgba8!(target_data, image_width, 80);
1783 }
1784
1785 #[test]
1786 fn check_rgba8_nearest_vertical_threading() {
1787 let image_width = 255;
1788 let image_height = 512;
1789 const CN: usize = 4;
1790 let mut image = vec![0u8; image_height * image_width * CN];
1791 for dst in image.chunks_exact_mut(4) {
1792 dst[0] = 124;
1793 dst[1] = 41;
1794 dst[2] = 99;
1795 dst[3] = 77;
1796 }
1797 let mut scaler = Scaler::new(ResamplingFunction::Nearest);
1798 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1799 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1800 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1801 scaler
1802 .resize_rgba(&src_store, &mut target_store, false)
1803 .unwrap();
1804 let target_data = target_store.buffer.borrow();
1805
1806 check_rgba8!(target_data, image_width, 80);
1807 }
1808}