1#![forbid(unsafe_code)]
30use crate::ar30::{Ar30ByteOrder, Rgb30};
31use crate::convolution::{ConvolutionOptions, HorizontalConvolutionPass, VerticalConvolutionPass};
32use crate::filter_weights::{FilterBounds, FilterWeights};
33use crate::image_size::ImageSize;
34use crate::image_store::{
35 AssociateAlpha, CheckStoreDensity, ImageStore, ImageStoreMut, UnassociateAlpha,
36};
37use crate::math::gaussian::Exponential;
38use crate::math::sinc::{Sinc, Trigonometry};
39use crate::nearest_sampler::resize_nearest;
40use crate::pic_scale_error::PicScaleError;
41use crate::resize_ar30::resize_ar30_impl;
42use crate::support::check_image_size_overflow;
43use crate::threading_policy::ThreadingPolicy;
44use crate::{
45 CbCr8ImageStore, CbCr16ImageStore, CbCrF32ImageStore, ConstPI, ConstSqrt2, Jinc,
46 Planar8ImageStore, Planar16ImageStore, PlanarF32ImageStore, ResamplingFunction, Rgb8ImageStore,
47 Rgb16ImageStore, RgbF32ImageStore, Rgba8ImageStore, Rgba16ImageStore, RgbaF32ImageStore,
48};
49use num_traits::{AsPrimitive, Float, Signed};
50use std::fmt::Debug;
51use std::ops::{AddAssign, MulAssign, Neg};
52
53#[derive(Debug, Copy, Clone)]
54pub struct Scaler {
56 pub(crate) function: ResamplingFunction,
57 pub(crate) threading_policy: ThreadingPolicy,
58 pub workload_strategy: WorkloadStrategy,
59}
60
61pub trait Scaling {
63 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy);
76
77 fn resize_plane<'a>(
90 &'a self,
91 store: &ImageStore<'a, u8, 1>,
92 into: &mut ImageStoreMut<'a, u8, 1>,
93 ) -> Result<(), PicScaleError>;
94
95 fn resize_cbcr8<'a>(
110 &'a self,
111 store: &ImageStore<'a, u8, 2>,
112 into: &mut ImageStoreMut<'a, u8, 2>,
113 ) -> Result<(), PicScaleError>;
114
115 fn resize_gray_alpha<'a>(
130 &'a self,
131 store: &ImageStore<'a, u8, 2>,
132 into: &mut ImageStoreMut<'a, u8, 2>,
133 premultiply_alpha: bool,
134 ) -> Result<(), PicScaleError>;
135
136 fn resize_rgb<'a>(
149 &'a self,
150 store: &ImageStore<'a, u8, 3>,
151 into: &mut ImageStoreMut<'a, u8, 3>,
152 ) -> Result<(), PicScaleError>;
153
154 fn resize_rgba<'a>(
170 &'a self,
171 store: &ImageStore<'a, u8, 4>,
172 into: &mut ImageStoreMut<'a, u8, 4>,
173 premultiply_alpha: bool,
174 ) -> Result<(), PicScaleError>;
175}
176
177pub trait ScalingF32 {
179 fn resize_plane_f32<'a>(
192 &'a self,
193 store: &ImageStore<'a, f32, 1>,
194 into: &mut ImageStoreMut<'a, f32, 1>,
195 ) -> Result<(), PicScaleError>;
196
197 fn resize_cbcr_f32<'a>(
212 &'a self,
213 store: &ImageStore<'a, f32, 2>,
214 into: &mut ImageStoreMut<'a, f32, 2>,
215 ) -> Result<(), PicScaleError>;
216
217 fn resize_gray_alpha_f32<'a>(
232 &'a self,
233 store: &ImageStore<'a, f32, 2>,
234 into: &mut ImageStoreMut<'a, f32, 2>,
235 premultiply_alpha: bool,
236 ) -> Result<(), PicScaleError>;
237
238 fn resize_rgb_f32<'a>(
253 &'a self,
254 store: &ImageStore<'a, f32, 3>,
255 into: &mut ImageStoreMut<'a, f32, 3>,
256 ) -> Result<(), PicScaleError>;
257
258 fn resize_rgba_f32<'a>(
274 &'a self,
275 store: &ImageStore<'a, f32, 4>,
276 into: &mut ImageStoreMut<'a, f32, 4>,
277 premultiply_alpha: bool,
278 ) -> Result<(), PicScaleError>;
279}
280
281#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Default)]
283pub enum WorkloadStrategy {
284 PreferQuality,
286 #[default]
288 PreferSpeed,
289}
290
291pub trait ScalingU16 {
293 fn resize_plane_u16<'a>(
317 &'a self,
318 store: &ImageStore<'a, u16, 1>,
319 into: &mut ImageStoreMut<'a, u16, 1>,
320 ) -> Result<(), PicScaleError>;
321
322 fn resize_cbcr_u16<'a>(
348 &'a self,
349 store: &ImageStore<'a, u16, 2>,
350 into: &mut ImageStoreMut<'a, u16, 2>,
351 ) -> Result<(), PicScaleError>;
352
353 fn resize_gray_alpha16<'a>(
368 &'a self,
369 store: &ImageStore<'a, u16, 2>,
370 into: &mut ImageStoreMut<'a, u16, 2>,
371 premultiply_alpha: bool,
372 ) -> Result<(), PicScaleError>;
373
374 fn resize_rgb_u16<'a>(
400 &'a self,
401 store: &ImageStore<'a, u16, 3>,
402 into: &mut ImageStoreMut<'a, u16, 3>,
403 ) -> Result<(), PicScaleError>;
404
405 fn resize_rgba_u16<'a>(
433 &'a self,
434 store: &ImageStore<'a, u16, 4>,
435 into: &mut ImageStoreMut<'a, u16, 4>,
436 premultiply_alpha: bool,
437 ) -> Result<(), PicScaleError>;
438}
439
440impl Scaler {
441 pub fn new(filter: ResamplingFunction) -> Self {
446 Scaler {
447 function: filter,
448 threading_policy: ThreadingPolicy::Single,
449 workload_strategy: WorkloadStrategy::default(),
450 }
451 }
452
453 pub fn set_workload_strategy(&mut self, workload_strategy: WorkloadStrategy) {
457 self.workload_strategy = workload_strategy;
458 }
459
460 pub(crate) fn generate_weights<T>(&self, in_size: usize, out_size: usize) -> FilterWeights<T>
461 where
462 T: Copy
463 + Neg
464 + Signed
465 + Float
466 + 'static
467 + ConstPI
468 + MulAssign<T>
469 + AddAssign<T>
470 + AsPrimitive<f64>
471 + AsPrimitive<usize>
472 + Jinc<T>
473 + ConstSqrt2
474 + Default
475 + AsPrimitive<i32>
476 + Trigonometry
477 + Exponential
478 + Sinc,
479 f32: AsPrimitive<T>,
480 f64: AsPrimitive<T>,
481 i64: AsPrimitive<T>,
482 i32: AsPrimitive<T>,
483 usize: AsPrimitive<T>,
484 {
485 let resampling_filter = self.function.get_resampling_filter();
486 let scale = in_size.as_() / out_size.as_();
487 let is_resizable_kernel = resampling_filter.is_resizable_kernel;
488 let filter_scale_cutoff = match is_resizable_kernel {
489 true => scale.max(1f32.as_()),
490 false => 1f32.as_(),
491 };
492 let filter_base_size = resampling_filter.min_kernel_size;
493 let resampling_function = resampling_filter.kernel;
494
495 let is_area = resampling_filter.is_area && scale < 1.as_();
496
497 let mut bounds: Vec<FilterBounds> = vec![FilterBounds::new(0, 0); out_size];
498
499 if !is_area {
500 let window_func = resampling_filter.window;
501 let base_size: usize = (filter_base_size.as_() * filter_scale_cutoff).round().as_();
502 let kernel_size = base_size;
503 let filter_radius = base_size.as_() / 2.as_();
504 let filter_scale = 1f32.as_() / filter_scale_cutoff;
505 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
506 let mut local_filters = vec![T::default(); kernel_size];
507 let mut filter_position = 0usize;
508 let blur_scale = match window_func {
509 None => 1f32.as_(),
510 Some(window) => {
511 if window.blur.as_() > 0f32.as_() {
512 1f32.as_() / window.blur.as_()
513 } else {
514 0f32.as_()
515 }
516 }
517 };
518 for (i, bound) in bounds.iter_mut().enumerate() {
519 let center_x = ((i.as_() + 0.5.as_()) * scale).min(in_size.as_());
520 let mut weights_sum: T = 0f32.as_();
521 let mut local_filter_iteration = 0usize;
522
523 let start: usize = (center_x - filter_radius).floor().max(0f32.as_()).as_();
524 let end: usize = (center_x + filter_radius)
525 .ceil()
526 .min(start.as_() + kernel_size.as_())
527 .min(in_size.as_())
528 .as_();
529
530 let center = center_x - 0.5.as_();
531
532 for (k, filter) in (start..end).zip(local_filters.iter_mut()) {
533 let dx = k.as_() - center;
534 let weight;
535 if let Some(resampling_window) = window_func {
536 let mut x = dx.abs();
537 x = if resampling_window.blur.as_() > 0f32.as_() {
538 x * blur_scale
539 } else {
540 x
541 };
542 x = if x <= resampling_window.taper.as_() {
543 0f32.as_()
544 } else {
545 (x - resampling_window.taper.as_())
546 / (1f32.as_() - resampling_window.taper.as_())
547 };
548 let window_producer = resampling_window.window;
549 let x_kernel_scaled = x * filter_scale;
550 let window = if x < resampling_window.window_size.as_() {
551 window_producer(x_kernel_scaled * resampling_window.window_size.as_())
552 } else {
553 0f32.as_()
554 };
555 weight = window * resampling_function(x_kernel_scaled);
556 } else {
557 let dx = dx.abs();
558 weight = resampling_function(dx * filter_scale);
559 }
560 weights_sum += weight;
561 *filter = weight;
562 local_filter_iteration += 1;
563 }
564
565 let alpha: T = 0.7f32.as_();
566 if resampling_filter.is_ewa && !local_filters.is_empty() {
567 weights_sum = local_filters[0];
568 for j in 1..local_filter_iteration {
569 let new_weight =
570 alpha * local_filters[j] + (1f32.as_() - alpha) * local_filters[j - 1];
571 local_filters[j] = new_weight;
572 weights_sum += new_weight;
573 }
574 }
575
576 let size = end - start;
577
578 *bound = FilterBounds::new(start, size);
579
580 if weights_sum != 0f32.as_() {
581 let recpeq = 1f32.as_() / weights_sum;
582
583 for (dst, src) in weights
584 .iter_mut()
585 .skip(filter_position)
586 .take(size)
587 .zip(local_filters.iter().take(size))
588 {
589 *dst = *src * recpeq;
590 }
591 }
592
593 filter_position += kernel_size;
594 }
595
596 FilterWeights::<T>::new(
597 weights,
598 kernel_size,
599 kernel_size,
600 out_size,
601 filter_radius.as_(),
602 bounds,
603 )
604 } else {
605 let inv_scale: T = 1.as_() / scale;
609 let kernel_size = 2;
610 let filter_radius: T = 1.as_();
611 let mut weights: Vec<T> = vec![T::default(); kernel_size * out_size];
612 let mut local_filters = vec![T::default(); kernel_size];
613 let mut filter_position = 0usize;
614
615 for (i, bound) in bounds.iter_mut().enumerate() {
616 let mut weights_sum: T = 0f32.as_();
617
618 let sx: T = (i.as_() * scale).floor();
619 let fx = (i as i64 + 1).as_() - (sx + 1.as_()) * inv_scale;
620 let dx = if fx <= 0.as_() {
621 0.as_()
622 } else {
623 fx - fx.floor()
624 };
625 let dx = dx.abs();
626 let weight0 = 1.as_() - dx;
627 let weight1: T = dx;
628 local_filters[0] = weight0;
629 local_filters[1] = weight1;
630
631 let start: usize = sx.floor().max(0f32.as_()).as_();
632 let end: usize = (sx + kernel_size.as_())
633 .ceil()
634 .min(start.as_() + kernel_size.as_())
635 .min(in_size.as_())
636 .as_();
637
638 let size = end - start;
639
640 weights_sum += weight0;
641 if size > 1 {
642 weights_sum += weight1;
643 }
644 *bound = FilterBounds::new(start, size);
645
646 if weights_sum != 0f32.as_() {
647 let recpeq = 1f32.as_() / weights_sum;
648
649 for (dst, src) in weights
650 .iter_mut()
651 .skip(filter_position)
652 .take(size)
653 .zip(local_filters.iter().take(size))
654 {
655 *dst = *src * recpeq;
656 }
657 } else {
658 weights[filter_position] = 1.as_();
659 }
660
661 filter_position += kernel_size;
662 }
663
664 FilterWeights::new(
665 weights,
666 kernel_size,
667 kernel_size,
668 out_size,
669 filter_radius.as_(),
670 bounds,
671 )
672 }
673 }
674}
675
676impl Scaler {
677 pub(crate) fn generic_resize<
678 'a,
679 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
680 const N: usize,
681 >(
682 &self,
683 store: &ImageStore<'a, T, N>,
684 into: &mut ImageStoreMut<'a, T, N>,
685 ) -> Result<(), PicScaleError>
686 where
687 ImageStore<'a, T, N>: VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N>,
688 ImageStoreMut<'a, T, N>: CheckStoreDensity,
689 {
690 let new_size = into.get_size();
691 into.validate()?;
692 store.validate()?;
693 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
694 return Err(PicScaleError::ZeroImageDimensions);
695 }
696
697 if check_image_size_overflow(store.width, store.height, store.channels) {
698 return Err(PicScaleError::SourceImageIsTooLarge);
699 }
700
701 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
702 return Err(PicScaleError::DestinationImageIsTooLarge);
703 }
704
705 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
706 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
707 }
708
709 if store.width == new_size.width && store.height == new_size.height {
710 store.copied_to_mut(into);
711 return Ok(());
712 }
713
714 let nova_thread_pool = self
715 .threading_policy
716 .get_nova_pool(ImageSize::new(new_size.width, new_size.height));
717
718 if self.function == ResamplingFunction::Nearest {
719 resize_nearest::<T, N>(
720 store.buffer.as_ref(),
721 store.width,
722 store.height,
723 into.buffer.borrow_mut(),
724 new_size.width,
725 new_size.height,
726 &nova_thread_pool,
727 );
728 return Ok(());
729 }
730
731 let should_do_horizontal = store.width != new_size.width;
732 let should_do_vertical = store.height != new_size.height;
733 assert!(should_do_horizontal || should_do_vertical);
734
735 if should_do_vertical && should_do_horizontal {
736 let mut target_vertical = vec![T::default(); store.width * new_size.height * N];
737
738 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
739 &mut target_vertical,
740 store.width,
741 new_size.height,
742 )?;
743 new_image_vertical.bit_depth = into.bit_depth;
744 let vertical_filters = self.generate_weights(store.height, new_size.height);
745 let options = ConvolutionOptions::new(self.workload_strategy);
746 store.convolve_vertical(
747 vertical_filters,
748 &mut new_image_vertical,
749 &nova_thread_pool,
750 options,
751 );
752
753 let new_immutable_store = ImageStore::<T, N> {
754 buffer: std::borrow::Cow::Owned(target_vertical),
755 channels: N,
756 width: store.width,
757 height: new_size.height,
758 stride: store.width * N,
759 bit_depth: into.bit_depth,
760 };
761 let horizontal_filters = self.generate_weights(store.width, new_size.width);
762 let options = ConvolutionOptions::new(self.workload_strategy);
763 new_immutable_store.convolve_horizontal(
764 horizontal_filters,
765 into,
766 &nova_thread_pool,
767 options,
768 );
769 Ok(())
770 } else if should_do_vertical {
771 let vertical_filters = self.generate_weights(store.height, new_size.height);
772 let options = ConvolutionOptions::new(self.workload_strategy);
773 store.convolve_vertical(vertical_filters, into, &nova_thread_pool, options);
774 Ok(())
775 } else {
776 assert!(should_do_horizontal);
777 let horizontal_filters = self.generate_weights(store.width, new_size.width);
778 let options = ConvolutionOptions::new(self.workload_strategy);
779 store.convolve_horizontal(horizontal_filters, into, &nova_thread_pool, options);
780 Ok(())
781 }
782 }
783
784 fn forward_resize_with_alpha<
785 'a,
786 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
787 const N: usize,
788 >(
789 &self,
790 store: &ImageStore<'a, T, N>,
791 into: &mut ImageStoreMut<'a, T, N>,
792 premultiply_alpha_requested: bool,
793 nova_thread_pool: &novtb::ThreadPool,
794 ) -> Result<(), PicScaleError>
795 where
796 ImageStore<'a, T, N>:
797 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
798 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
799 {
800 let new_size = into.get_size();
801 let mut src_store: std::borrow::Cow<'_, ImageStore<'_, T, N>> =
802 std::borrow::Cow::Borrowed(store);
803
804 let mut has_alpha_premultiplied = true;
805
806 if premultiply_alpha_requested {
807 let is_alpha_premultiplication_reasonable =
808 src_store.is_alpha_premultiplication_needed();
809 if is_alpha_premultiplication_reasonable {
810 let mut target_premultiplied =
811 vec![T::default(); src_store.width * src_store.height * N];
812 let mut new_store = ImageStoreMut::<T, N>::from_slice(
813 &mut target_premultiplied,
814 src_store.width,
815 src_store.height,
816 )?;
817 new_store.bit_depth = into.bit_depth;
818 src_store.premultiply_alpha(&mut new_store, nova_thread_pool);
819 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
820 buffer: std::borrow::Cow::Owned(target_premultiplied),
821 channels: N,
822 width: src_store.width,
823 height: src_store.height,
824 stride: src_store.width * N,
825 bit_depth: into.bit_depth,
826 });
827 has_alpha_premultiplied = true;
828 }
829 }
830
831 let mut target_vertical = vec![T::default(); src_store.width * new_size.height * N];
832
833 let mut new_image_vertical = ImageStoreMut::<T, N>::from_slice(
834 &mut target_vertical,
835 src_store.width,
836 new_size.height,
837 )?;
838 new_image_vertical.bit_depth = into.bit_depth;
839 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
840 let options = ConvolutionOptions::new(self.workload_strategy);
841 src_store.convolve_vertical(
842 vertical_filters,
843 &mut new_image_vertical,
844 nova_thread_pool,
845 options,
846 );
847
848 let new_immutable_store = ImageStore::<T, N> {
849 buffer: std::borrow::Cow::Owned(target_vertical),
850 channels: N,
851 width: src_store.width,
852 height: new_size.height,
853 stride: src_store.width * N,
854 bit_depth: into.bit_depth,
855 };
856 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
857 let options = ConvolutionOptions::new(self.workload_strategy);
858 new_immutable_store.convolve_horizontal(
859 horizontal_filters,
860 into,
861 nova_thread_pool,
862 options,
863 );
864
865 if premultiply_alpha_requested && has_alpha_premultiplied {
866 into.unpremultiply_alpha(nova_thread_pool, self.workload_strategy);
867 }
868
869 Ok(())
870 }
871
872 fn forward_resize_vertical_with_alpha<
873 'a,
874 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
875 const N: usize,
876 >(
877 &self,
878 store: &ImageStore<'a, T, N>,
879 into: &mut ImageStoreMut<'a, T, N>,
880 premultiply_alpha_requested: bool,
881 nova_thread_pool: &novtb::ThreadPool,
882 ) -> Result<(), PicScaleError>
883 where
884 ImageStore<'a, T, N>:
885 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
886 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
887 {
888 let new_size = into.get_size();
889 let mut src_store = std::borrow::Cow::Borrowed(store);
890
891 let mut has_alpha_premultiplied = true;
892
893 if premultiply_alpha_requested {
894 let is_alpha_premultiplication_reasonable =
895 src_store.is_alpha_premultiplication_needed();
896 if is_alpha_premultiplication_reasonable {
897 let mut target_premultiplied =
898 vec![T::default(); src_store.width * src_store.height * N];
899 let mut new_store = ImageStoreMut::<T, N>::from_slice(
900 &mut target_premultiplied,
901 src_store.width,
902 src_store.height,
903 )?;
904 new_store.bit_depth = into.bit_depth;
905 src_store.premultiply_alpha(&mut new_store, nova_thread_pool);
906 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
907 buffer: std::borrow::Cow::Owned(target_premultiplied),
908 channels: N,
909 width: src_store.width,
910 height: src_store.height,
911 stride: src_store.width * N,
912 bit_depth: into.bit_depth,
913 });
914 has_alpha_premultiplied = true;
915 }
916 }
917
918 let vertical_filters = self.generate_weights(src_store.height, new_size.height);
919 let options = ConvolutionOptions::new(self.workload_strategy);
920 src_store.convolve_vertical(vertical_filters, into, nova_thread_pool, options);
921
922 if premultiply_alpha_requested && has_alpha_premultiplied {
923 into.unpremultiply_alpha(nova_thread_pool, self.workload_strategy);
924 }
925
926 Ok(())
927 }
928
929 fn forward_resize_horizontal_with_alpha<
930 'a,
931 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
932 const N: usize,
933 >(
934 &self,
935 store: &ImageStore<'a, T, N>,
936 into: &mut ImageStoreMut<'a, T, N>,
937 premultiply_alpha_requested: bool,
938 nova_thread_pool: &novtb::ThreadPool,
939 ) -> Result<(), PicScaleError>
940 where
941 ImageStore<'a, T, N>:
942 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
943 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
944 {
945 let new_size = into.get_size();
946 let mut src_store = std::borrow::Cow::Borrowed(store);
947
948 let mut has_alpha_premultiplied = true;
949
950 if premultiply_alpha_requested {
951 let is_alpha_premultiplication_reasonable =
952 src_store.is_alpha_premultiplication_needed();
953 if is_alpha_premultiplication_reasonable {
954 let mut target_premultiplied =
955 vec![T::default(); src_store.width * src_store.height * N];
956 let mut new_store = ImageStoreMut::<T, N>::from_slice(
957 &mut target_premultiplied,
958 src_store.width,
959 src_store.height,
960 )?;
961 new_store.bit_depth = into.bit_depth;
962 src_store.premultiply_alpha(&mut new_store, nova_thread_pool);
963 src_store = std::borrow::Cow::Owned(ImageStore::<T, N> {
964 buffer: std::borrow::Cow::Owned(target_premultiplied),
965 channels: N,
966 width: src_store.width,
967 height: src_store.height,
968 stride: src_store.width * N,
969 bit_depth: into.bit_depth,
970 });
971 has_alpha_premultiplied = true;
972 }
973 }
974
975 let horizontal_filters = self.generate_weights(src_store.width, new_size.width);
976 let options = ConvolutionOptions::new(self.workload_strategy);
977 src_store.convolve_horizontal(horizontal_filters, into, nova_thread_pool, options);
978
979 if premultiply_alpha_requested && has_alpha_premultiplied {
980 into.unpremultiply_alpha(nova_thread_pool, self.workload_strategy);
981 }
982
983 Ok(())
984 }
985
986 pub(crate) fn generic_resize_with_alpha<
987 'a,
988 T: Clone + Copy + Debug + Send + Sync + Default + 'static,
989 const N: usize,
990 >(
991 &self,
992 store: &ImageStore<'a, T, N>,
993 into: &mut ImageStoreMut<'a, T, N>,
994 premultiply_alpha_requested: bool,
995 ) -> Result<(), PicScaleError>
996 where
997 ImageStore<'a, T, N>:
998 VerticalConvolutionPass<T, N> + HorizontalConvolutionPass<T, N> + AssociateAlpha<T, N>,
999 ImageStoreMut<'a, T, N>: CheckStoreDensity + UnassociateAlpha<T, N>,
1000 {
1001 let new_size = into.get_size();
1002 into.validate()?;
1003 store.validate()?;
1004 if store.width == 0 || store.height == 0 || new_size.width == 0 || new_size.height == 0 {
1005 return Err(PicScaleError::ZeroImageDimensions);
1006 }
1007
1008 if check_image_size_overflow(store.width, store.height, store.channels) {
1009 return Err(PicScaleError::SourceImageIsTooLarge);
1010 }
1011
1012 if check_image_size_overflow(new_size.width, new_size.height, store.channels) {
1013 return Err(PicScaleError::DestinationImageIsTooLarge);
1014 }
1015
1016 if into.should_have_bit_depth() && !(1..=16).contains(&into.bit_depth) {
1017 return Err(PicScaleError::UnsupportedBitDepth(into.bit_depth));
1018 }
1019
1020 if store.width == new_size.width && store.height == new_size.height {
1021 store.copied_to_mut(into);
1022 return Ok(());
1023 }
1024
1025 let nova_thread_pool = self
1026 .threading_policy
1027 .get_nova_pool(ImageSize::new(new_size.width, new_size.height));
1028
1029 if self.function == ResamplingFunction::Nearest {
1030 resize_nearest::<T, N>(
1031 store.buffer.as_ref(),
1032 store.width,
1033 store.height,
1034 into.buffer.borrow_mut(),
1035 new_size.width,
1036 new_size.height,
1037 &nova_thread_pool,
1038 );
1039 return Ok(());
1040 }
1041
1042 let should_do_horizontal = store.width != new_size.width;
1043 let should_do_vertical = store.height != new_size.height;
1044 assert!(should_do_horizontal || should_do_vertical);
1045
1046 if should_do_vertical && should_do_horizontal {
1047 self.forward_resize_with_alpha(
1048 store,
1049 into,
1050 premultiply_alpha_requested,
1051 &nova_thread_pool,
1052 )
1053 } else if should_do_vertical {
1054 self.forward_resize_vertical_with_alpha(
1055 store,
1056 into,
1057 premultiply_alpha_requested,
1058 &nova_thread_pool,
1059 )
1060 } else {
1061 assert!(should_do_horizontal);
1062 self.forward_resize_horizontal_with_alpha(
1063 store,
1064 into,
1065 premultiply_alpha_requested,
1066 &nova_thread_pool,
1067 )
1068 }
1069 }
1070}
1071
1072impl Scaling for Scaler {
1073 fn set_threading_policy(&mut self, threading_policy: ThreadingPolicy) {
1074 self.threading_policy = threading_policy;
1075 }
1076
1077 fn resize_plane<'a>(
1078 &'a self,
1079 store: &ImageStore<'a, u8, 1>,
1080 into: &mut ImageStoreMut<'a, u8, 1>,
1081 ) -> Result<(), PicScaleError> {
1082 self.generic_resize(store, into)
1083 }
1084
1085 fn resize_cbcr8<'a>(
1086 &'a self,
1087 store: &ImageStore<'a, u8, 2>,
1088 into: &mut ImageStoreMut<'a, u8, 2>,
1089 ) -> Result<(), PicScaleError> {
1090 self.generic_resize(store, into)
1091 }
1092
1093 fn resize_gray_alpha<'a>(
1094 &'a self,
1095 store: &ImageStore<'a, u8, 2>,
1096 into: &mut ImageStoreMut<'a, u8, 2>,
1097 premultiply_alpha: bool,
1098 ) -> Result<(), PicScaleError> {
1099 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1100 }
1101
1102 fn resize_rgb<'a>(
1103 &'a self,
1104 store: &ImageStore<'a, u8, 3>,
1105 into: &mut ImageStoreMut<'a, u8, 3>,
1106 ) -> Result<(), PicScaleError> {
1107 self.generic_resize(store, into)
1108 }
1109
1110 fn resize_rgba<'a>(
1111 &'a self,
1112 store: &ImageStore<'a, u8, 4>,
1113 into: &mut ImageStoreMut<'a, u8, 4>,
1114 premultiply_alpha: bool,
1115 ) -> Result<(), PicScaleError> {
1116 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1117 }
1118}
1119
1120impl ScalingF32 for Scaler {
1121 fn resize_plane_f32<'a>(
1122 &'a self,
1123 store: &ImageStore<'a, f32, 1>,
1124 into: &mut ImageStoreMut<'a, f32, 1>,
1125 ) -> Result<(), PicScaleError> {
1126 self.generic_resize(store, into)
1127 }
1128
1129 fn resize_cbcr_f32<'a>(
1130 &'a self,
1131 store: &ImageStore<'a, f32, 2>,
1132 into: &mut ImageStoreMut<'a, f32, 2>,
1133 ) -> Result<(), PicScaleError> {
1134 self.generic_resize(store, into)
1135 }
1136
1137 fn resize_gray_alpha_f32<'a>(
1138 &'a self,
1139 store: &ImageStore<'a, f32, 2>,
1140 into: &mut ImageStoreMut<'a, f32, 2>,
1141 premultiply_alpha: bool,
1142 ) -> Result<(), PicScaleError> {
1143 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1144 }
1145
1146 fn resize_rgb_f32<'a>(
1147 &'a self,
1148 store: &ImageStore<'a, f32, 3>,
1149 into: &mut ImageStoreMut<'a, f32, 3>,
1150 ) -> Result<(), PicScaleError> {
1151 self.generic_resize(store, into)
1152 }
1153
1154 fn resize_rgba_f32<'a>(
1155 &'a self,
1156 store: &ImageStore<'a, f32, 4>,
1157 into: &mut ImageStoreMut<'a, f32, 4>,
1158 premultiply_alpha: bool,
1159 ) -> Result<(), PicScaleError> {
1160 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1161 }
1162}
1163
1164impl ScalingU16 for Scaler {
1165 fn resize_rgb_u16<'a>(
1191 &'a self,
1192 store: &ImageStore<'a, u16, 3>,
1193 into: &mut ImageStoreMut<'a, u16, 3>,
1194 ) -> Result<(), PicScaleError> {
1195 self.generic_resize(store, into)
1196 }
1197
1198 fn resize_cbcr_u16<'a>(
1199 &'a self,
1200 store: &ImageStore<'a, u16, 2>,
1201 into: &mut ImageStoreMut<'a, u16, 2>,
1202 ) -> Result<(), PicScaleError> {
1203 self.generic_resize(store, into)
1204 }
1205
1206 fn resize_gray_alpha16<'a>(
1207 &'a self,
1208 store: &ImageStore<'a, u16, 2>,
1209 into: &mut ImageStoreMut<'a, u16, 2>,
1210 premultiply_alpha: bool,
1211 ) -> Result<(), PicScaleError> {
1212 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1213 }
1214
1215 fn resize_rgba_u16<'a>(
1237 &'a self,
1238 store: &ImageStore<'a, u16, 4>,
1239 into: &mut ImageStoreMut<'a, u16, 4>,
1240 premultiply_alpha: bool,
1241 ) -> Result<(), PicScaleError> {
1242 self.generic_resize_with_alpha(store, into, premultiply_alpha)
1243 }
1244
1245 fn resize_plane_u16<'a>(
1269 &'a self,
1270 store: &ImageStore<'a, u16, 1>,
1271 into: &mut ImageStoreMut<'a, u16, 1>,
1272 ) -> Result<(), PicScaleError> {
1273 self.generic_resize(store, into)
1274 }
1275}
1276
1277impl Scaler {
1278 pub fn resize_ar30(
1288 &self,
1289 src_image: &ImageStore<u8, 4>,
1290 dst_image: &mut ImageStoreMut<u8, 4>,
1291 order: Ar30ByteOrder,
1292 ) -> Result<(), PicScaleError> {
1293 src_image.validate()?;
1294 dst_image.validate()?;
1295 let dst_size = dst_image.get_size();
1296 let dst_stride = dst_image.stride();
1297 match order {
1298 Ar30ByteOrder::Host => {
1299 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Host as usize }>(
1300 src_image.as_bytes(),
1301 src_image.stride,
1302 src_image.get_size(),
1303 dst_image.buffer.borrow_mut(),
1304 dst_stride,
1305 dst_size,
1306 self,
1307 )
1308 }
1309 Ar30ByteOrder::Network => {
1310 resize_ar30_impl::<{ Rgb30::Ar30 as usize }, { Ar30ByteOrder::Network as usize }>(
1311 src_image.as_bytes(),
1312 src_image.stride,
1313 src_image.get_size(),
1314 dst_image.buffer.borrow_mut(),
1315 dst_stride,
1316 dst_size,
1317 self,
1318 )
1319 }
1320 }
1321 }
1322
1323 pub fn resize_ra30(
1332 &self,
1333 src_image: &ImageStore<u8, 4>,
1334 dst_image: &mut ImageStoreMut<u8, 4>,
1335 order: Ar30ByteOrder,
1336 ) -> Result<(), PicScaleError> {
1337 src_image.validate()?;
1338 dst_image.validate()?;
1339 let dst_size = dst_image.get_size();
1340 let dst_stride = dst_image.stride();
1341 match order {
1342 Ar30ByteOrder::Host => {
1343 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Host as usize }>(
1344 src_image.as_bytes(),
1345 src_image.stride,
1346 src_image.get_size(),
1347 dst_image.buffer.borrow_mut(),
1348 dst_stride,
1349 dst_size,
1350 self,
1351 )
1352 }
1353 Ar30ByteOrder::Network => {
1354 resize_ar30_impl::<{ Rgb30::Ra30 as usize }, { Ar30ByteOrder::Network as usize }>(
1355 src_image.as_bytes(),
1356 src_image.stride,
1357 src_image.get_size(),
1358 dst_image.buffer.borrow_mut(),
1359 dst_stride,
1360 dst_size,
1361 self,
1362 )
1363 }
1364 }
1365 }
1366}
1367
1368#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Default)]
1370pub struct ScalingOptions {
1371 pub resampling_function: ResamplingFunction,
1372 pub premultiply_alpha: bool,
1373 pub threading_policy: ThreadingPolicy,
1374}
1375
1376pub trait ImageStoreScaling<'b, T, const N: usize>
1378where
1379 T: Clone + Copy + Debug,
1380{
1381 fn scale(
1382 &self,
1383 store: &mut ImageStoreMut<'b, T, N>,
1384 options: ScalingOptions,
1385 ) -> Result<(), PicScaleError>;
1386}
1387
1388macro_rules! def_image_scaling_alpha {
1389 ($clazz: ident, $fx_type: ident, $cn: expr) => {
1390 impl<'b> ImageStoreScaling<'b, $fx_type, $cn> for $clazz<'b> {
1391 fn scale(
1392 &self,
1393 store: &mut ImageStoreMut<'b, $fx_type, $cn>,
1394 options: ScalingOptions,
1395 ) -> Result<(), PicScaleError> {
1396 let mut scaler = Scaler::new(options.resampling_function);
1397 scaler.set_threading_policy(options.threading_policy);
1398 scaler.generic_resize_with_alpha(self, store, options.premultiply_alpha)
1399 }
1400 }
1401 };
1402}
1403
1404macro_rules! def_image_scaling {
1405 ($clazz: ident, $fx_type: ident, $cn: expr) => {
1406 impl<'b> ImageStoreScaling<'b, $fx_type, $cn> for $clazz<'b> {
1407 fn scale(
1408 &self,
1409 store: &mut ImageStoreMut<'b, $fx_type, $cn>,
1410 options: ScalingOptions,
1411 ) -> Result<(), PicScaleError> {
1412 let mut scaler = Scaler::new(options.resampling_function);
1413 scaler.set_threading_policy(options.threading_policy);
1414 scaler.generic_resize(self, store)
1415 }
1416 }
1417 };
1418}
1419
1420def_image_scaling_alpha!(Rgba8ImageStore, u8, 4);
1421def_image_scaling!(Rgb8ImageStore, u8, 3);
1422def_image_scaling!(CbCr8ImageStore, u8, 2);
1423def_image_scaling!(Planar8ImageStore, u8, 1);
1424def_image_scaling!(Planar16ImageStore, u16, 1);
1425def_image_scaling!(CbCr16ImageStore, u16, 2);
1426def_image_scaling!(Rgb16ImageStore, u16, 3);
1427def_image_scaling_alpha!(Rgba16ImageStore, u16, 4);
1428def_image_scaling!(PlanarF32ImageStore, f32, 1);
1429def_image_scaling!(CbCrF32ImageStore, f32, 2);
1430def_image_scaling!(RgbF32ImageStore, f32, 3);
1431def_image_scaling_alpha!(RgbaF32ImageStore, f32, 4);
1432
1433#[cfg(test)]
1434mod tests {
1435 use super::*;
1436
1437 macro_rules! check_rgba8 {
1438 ($dst: expr, $image_width: expr, $max: expr) => {
1439 {
1440 for (y, row) in $dst.chunks_exact($image_width * 4).enumerate() {
1441 for (i, dst) in row.chunks_exact(4).enumerate() {
1442 let diff0 = (dst[0] as i32 - 124).abs();
1443 let diff1 = (dst[1] as i32 - 41).abs();
1444 let diff2 = (dst[2] as i32 - 99).abs();
1445 let diff3 = (dst[3] as i32 - 77).abs();
1446 assert!(
1447 diff0 < $max,
1448 "Diff for channel 0 is expected < {}, but it was {diff0}, at (y: {y}, x: {i})",
1449 $max
1450 );
1451 assert!(
1452 diff1 < $max,
1453 "Diff for channel 1 is expected < {}, but it was {diff1}, at (y: {y}, x: {i})",
1454 $max
1455 );
1456 assert!(
1457 diff2 < $max,
1458 "Diff for channel 2 is expected < {}, but it was {diff2}, at (y: {y}, x: {i})",
1459 $max
1460 );
1461 assert!(
1462 diff3 < $max,
1463 "Diff for channel 3 is expected < {}, but it was {diff3}, at (y: {y}, x: {i})",
1464 $max
1465 );
1466 }
1467 }
1468 }
1469 };
1470 }
1471
1472 macro_rules! check_rgb16 {
1473 ($dst: expr, $image_width: expr, $max: expr) => {
1474 {
1475 for (y, row) in $dst.chunks_exact($image_width * 3).enumerate() {
1476 for (i, dst) in row.chunks_exact(3).enumerate() {
1477 let diff0 = (dst[0] as i32 - 124).abs();
1478 let diff1 = (dst[1] as i32 - 41).abs();
1479 let diff2 = (dst[2] as i32 - 99).abs();
1480 assert!(
1481 diff0 < $max,
1482 "Diff for channel 0 is expected < {}, but it was {diff0}, at (y: {y}, x: {i})",
1483 $max
1484 );
1485 assert!(
1486 diff1 < $max,
1487 "Diff for channel 1 is expected < {}, but it was {diff1}, at (y: {y}, x: {i})",
1488 $max
1489 );
1490 assert!(
1491 diff2 < $max,
1492 "Diff for channel 2 is expected < {}, but it was {diff2}, at (y: {y}, x: {i})",
1493 $max
1494 );
1495 }
1496 }
1497 }
1498 };
1499 }
1500
1501 #[test]
1502 fn check_rgba8_resizing_vertical() {
1503 let image_width = 255;
1504 let image_height = 512;
1505 const CN: usize = 4;
1506 let mut image = vec![0u8; image_height * image_width * CN];
1507 for dst in image.chunks_exact_mut(4) {
1508 dst[0] = 124;
1509 dst[1] = 41;
1510 dst[2] = 99;
1511 dst[3] = 77;
1512 }
1513 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1514 scaler.set_threading_policy(ThreadingPolicy::Single);
1515 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1516 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1517 scaler
1518 .resize_rgba(&src_store, &mut target_store, false)
1519 .unwrap();
1520 let target_data = target_store.buffer.borrow();
1521 check_rgba8!(target_data, image_width, 34);
1522 }
1523
1524 #[test]
1525 fn check_rgba8_resizing_both() {
1526 let image_width = 255;
1527 let image_height = 512;
1528 const CN: usize = 4;
1529 let mut image = vec![0u8; image_height * image_width * CN];
1530 for dst in image.chunks_exact_mut(4) {
1531 dst[0] = 124;
1532 dst[1] = 41;
1533 dst[2] = 99;
1534 dst[3] = 77;
1535 }
1536 image[3] = 78;
1537 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1538 scaler.set_threading_policy(ThreadingPolicy::Single);
1539 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1540 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1541 scaler
1542 .resize_rgba(&src_store, &mut target_store, false)
1543 .unwrap();
1544 let target_data = target_store.buffer.borrow();
1545 check_rgba8!(target_data, image_width, 34);
1546 }
1547
1548 #[test]
1549 fn check_rgba8_resizing_alpha() {
1550 let image_width = 255;
1551 let image_height = 512;
1552 const CN: usize = 4;
1553 let mut image = vec![0u8; image_height * image_width * CN];
1554 for dst in image.chunks_exact_mut(4) {
1555 dst[0] = 124;
1556 dst[1] = 41;
1557 dst[2] = 99;
1558 dst[3] = 77;
1559 }
1560 image[3] = 78;
1561 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1562 scaler.set_threading_policy(ThreadingPolicy::Single);
1563 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1564 let mut target_store = ImageStoreMut::alloc(image_width / 2, image_height / 2);
1565 scaler
1566 .resize_rgba(&src_store, &mut target_store, true)
1567 .unwrap();
1568 let target_data = target_store.buffer.borrow();
1569 check_rgba8!(target_data, image_width, 126);
1570 }
1571
1572 #[test]
1573 fn check_rgb8_resizing_vertical() {
1574 let image_width = 255;
1575 let image_height = 512;
1576 const CN: usize = 3;
1577 let mut image = vec![0u8; image_height * image_width * CN];
1578 for dst in image.chunks_exact_mut(3) {
1579 dst[0] = 124;
1580 dst[1] = 41;
1581 dst[2] = 99;
1582 }
1583 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1584 scaler.set_threading_policy(ThreadingPolicy::Single);
1585 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1586 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1587 scaler.resize_rgb(&src_store, &mut target_store).unwrap();
1588 let target_data = target_store.buffer.borrow();
1589
1590 check_rgb16!(target_data, image_width, 85);
1591 }
1592
1593 #[test]
1594 fn check_rgb8_resizing_vertical_threading() {
1595 let image_width = 255;
1596 let image_height = 512;
1597 const CN: usize = 3;
1598 let mut image = vec![0u8; image_height * image_width * CN];
1599 for dst in image.chunks_exact_mut(3) {
1600 dst[0] = 124;
1601 dst[1] = 41;
1602 dst[2] = 99;
1603 }
1604 let mut scaler = Scaler::new(ResamplingFunction::Bilinear);
1605 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1606 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1607 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1608 scaler.resize_rgb(&src_store, &mut target_store).unwrap();
1609 let target_data = target_store.buffer.borrow();
1610
1611 check_rgb16!(target_data, image_width, 85);
1612 }
1613
1614 #[test]
1615 fn check_rgba10_resizing_vertical() {
1616 let image_width = 8;
1617 let image_height = 8;
1618 const CN: usize = 4;
1619 let mut image = vec![0u16; image_height * image_width * CN];
1620 for dst in image.chunks_exact_mut(4) {
1621 dst[0] = 124;
1622 dst[1] = 41;
1623 dst[2] = 99;
1624 dst[3] = 77;
1625 }
1626 image[3] = 78;
1627 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1628 scaler.set_threading_policy(ThreadingPolicy::Single);
1629 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1630 src_store.bit_depth = 10;
1631 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1632 scaler
1633 .resize_rgba_u16(&src_store, &mut target_store, false)
1634 .unwrap();
1635 let target_data = target_store.buffer.borrow();
1636
1637 check_rgba8!(target_data, image_width, 60);
1638 }
1639
1640 #[test]
1641 fn check_rgb10_resizing_vertical() {
1642 let image_width = 8;
1643 let image_height = 4;
1644 const CN: usize = 3;
1645 let mut image = vec![0; image_height * image_width * CN];
1646 for dst in image.chunks_exact_mut(3) {
1647 dst[0] = 124;
1648 dst[1] = 41;
1649 dst[2] = 99;
1650 }
1651 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1652 scaler.set_threading_policy(ThreadingPolicy::Single);
1653 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1654 src_store.bit_depth = 10;
1655 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1656 scaler
1657 .resize_rgb_u16(&src_store, &mut target_store)
1658 .unwrap();
1659 let target_data = target_store.buffer.borrow();
1660
1661 check_rgb16!(target_data, image_width, 85);
1662 }
1663
1664 #[test]
1665 fn check_rgb10_resizing_vertical_adaptive() {
1666 let image_width = 8;
1667 let image_height = 4;
1668 const CN: usize = 3;
1669 let mut image = vec![0; image_height * image_width * CN];
1670 for dst in image.chunks_exact_mut(3) {
1671 dst[0] = 124;
1672 dst[1] = 41;
1673 dst[2] = 99;
1674 }
1675 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1676 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1677 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1678 src_store.bit_depth = 10;
1679 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 10);
1680 scaler
1681 .resize_rgb_u16(&src_store, &mut target_store)
1682 .unwrap();
1683 let target_data = target_store.buffer.borrow();
1684
1685 check_rgb16!(target_data, image_width, 85);
1686 }
1687
1688 #[test]
1689 fn check_rgb16_resizing_vertical() {
1690 let image_width = 8;
1691 let image_height = 8;
1692 const CN: usize = 3;
1693 let mut image = vec![164; image_height * image_width * CN];
1694 for dst in image.chunks_exact_mut(3) {
1695 dst[0] = 124;
1696 dst[1] = 41;
1697 dst[2] = 99;
1698 }
1699 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1700 scaler.set_threading_policy(ThreadingPolicy::Single);
1701 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1702 src_store.bit_depth = 10;
1703 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1704 scaler
1705 .resize_rgb_u16(&src_store, &mut target_store)
1706 .unwrap();
1707 let target_data = target_store.buffer.borrow();
1708
1709 check_rgb16!(target_data, image_width, 100);
1710 }
1711
1712 #[test]
1713 fn check_rgba16_resizing_vertical() {
1714 let image_width = 8;
1715 let image_height = 8;
1716 const CN: usize = 4;
1717 let mut image = vec![0u16; image_height * image_width * CN];
1718 for dst in image.chunks_exact_mut(4) {
1719 dst[0] = 124;
1720 dst[1] = 41;
1721 dst[2] = 99;
1722 dst[3] = 255;
1723 }
1724 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1725 scaler.set_threading_policy(ThreadingPolicy::Single);
1726 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1727 src_store.bit_depth = 10;
1728 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1729 scaler
1730 .resize_rgba_u16(&src_store, &mut target_store, false)
1731 .unwrap();
1732 let target_data = target_store.buffer.borrow();
1733
1734 check_rgba8!(target_data, image_width, 180);
1735 }
1736
1737 #[test]
1738 fn check_rgba16_resizing_vertical_threading() {
1739 let image_width = 8;
1740 let image_height = 8;
1741 const CN: usize = 4;
1742 let mut image = vec![0u16; image_height * image_width * CN];
1743 for dst in image.chunks_exact_mut(4) {
1744 dst[0] = 124;
1745 dst[1] = 41;
1746 dst[2] = 99;
1747 dst[3] = 255;
1748 }
1749 let mut scaler = Scaler::new(ResamplingFunction::Lanczos3);
1750 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1751 let mut src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1752 src_store.bit_depth = 10;
1753 let mut target_store = ImageStoreMut::alloc_with_depth(image_width, image_height / 2, 16);
1754 scaler
1755 .resize_rgba_u16(&src_store, &mut target_store, false)
1756 .unwrap();
1757 let target_data = target_store.buffer.borrow();
1758
1759 check_rgba8!(target_data, image_width, 180);
1760 }
1761
1762 #[test]
1763 fn check_rgba8_nearest_vertical() {
1764 let image_width = 255;
1765 let image_height = 512;
1766 const CN: usize = 4;
1767 let mut image = vec![0u8; image_height * image_width * CN];
1768 for dst in image.chunks_exact_mut(4) {
1769 dst[0] = 124;
1770 dst[1] = 41;
1771 dst[2] = 99;
1772 dst[3] = 77;
1773 }
1774 let mut scaler = Scaler::new(ResamplingFunction::Nearest);
1775 scaler.set_threading_policy(ThreadingPolicy::Single);
1776 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1777 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1778 scaler
1779 .resize_rgba(&src_store, &mut target_store, false)
1780 .unwrap();
1781 let target_data = target_store.buffer.borrow();
1782
1783 check_rgba8!(target_data, image_width, 80);
1784 }
1785
1786 #[test]
1787 fn check_rgba8_nearest_vertical_threading() {
1788 let image_width = 255;
1789 let image_height = 512;
1790 const CN: usize = 4;
1791 let mut image = vec![0u8; image_height * image_width * CN];
1792 for dst in image.chunks_exact_mut(4) {
1793 dst[0] = 124;
1794 dst[1] = 41;
1795 dst[2] = 99;
1796 dst[3] = 77;
1797 }
1798 let mut scaler = Scaler::new(ResamplingFunction::Nearest);
1799 scaler.set_threading_policy(ThreadingPolicy::Adaptive);
1800 let src_store = ImageStore::from_slice(&image, image_width, image_height).unwrap();
1801 let mut target_store = ImageStoreMut::alloc(image_width, image_height / 2);
1802 scaler
1803 .resize_rgba(&src_store, &mut target_store, false)
1804 .unwrap();
1805 let target_data = target_store.buffer.borrow();
1806
1807 check_rgba8!(target_data, image_width, 80);
1808 }
1809}