1use crate::alpha_check::has_non_constant_cap_alpha_rgba_f32;
30#[cfg(feature = "nightly_f16")]
31use crate::alpha_handle_f16::{premultiply_alpha_rgba_f16, unpremultiply_alpha_rgba_f16};
32use crate::alpha_handle_f32::{premultiply_alpha_rgba_f32, unpremultiply_alpha_rgba_f32};
33use crate::alpha_handle_u8::{premultiply_alpha_rgba, unpremultiply_alpha_rgba};
34use crate::alpha_handle_u16::{premultiply_alpha_rgba_u16, unpremultiply_alpha_rgba_u16};
35use crate::support::check_image_size_overflow;
36use crate::validation::{PicScaleBufferMismatch, PicScaleError, try_vec};
37use crate::{ImageSize, WorkloadStrategy};
38#[cfg(feature = "nightly_f16")]
39use core::f16;
40use std::fmt::Debug;
41
42#[derive(Clone)]
52pub struct ImageStore<'a, T, const N: usize>
53where
54 [T]: ToOwned<Owned = Vec<T>>,
55{
56 pub buffer: std::borrow::Cow<'a, [T]>,
57 pub channels: usize,
59 pub width: usize,
61 pub height: usize,
63 pub stride: usize,
65 pub bit_depth: usize,
67}
68
69pub struct ImageStoreMut<'a, T, const N: usize> {
79 pub buffer: BufferStore<'a, T>,
80 pub channels: usize,
82 pub width: usize,
84 pub height: usize,
86 pub stride: usize,
88 pub bit_depth: usize,
90}
91
92pub(crate) trait CheckStoreDensity {
93 fn should_have_bit_depth(&self) -> bool;
94}
95
96pub enum BufferStore<'a, T> {
98 Borrowed(&'a mut [T]),
99 Owned(Vec<T>),
100}
101
102impl<T> BufferStore<'_, T> {
103 #[allow(clippy::should_implement_trait)]
104 pub fn borrow(&self) -> &[T] {
106 match self {
107 Self::Borrowed(p_ref) => p_ref,
108 Self::Owned(vec) => vec,
109 }
110 }
111
112 #[allow(clippy::should_implement_trait)]
113 pub fn borrow_mut(&mut self) -> &mut [T] {
115 match self {
116 Self::Borrowed(p_ref) => p_ref,
117 Self::Owned(vec) => vec,
118 }
119 }
120}
121
122impl<'a, T, const N: usize> ImageStore<'a, T, N>
123where
124 T: Clone + Copy + Debug + Default,
125{
126 pub fn new(
128 slice_ref: Vec<T>,
129 width: usize,
130 height: usize,
131 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
132 let expected_size = width * height * N;
133 if slice_ref.len() != width * height * N {
134 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
135 expected: expected_size,
136 width,
137 height,
138 channels: N,
139 slice_len: slice_ref.len(),
140 }));
141 }
142 Ok(ImageStore::<T, N> {
143 buffer: std::borrow::Cow::Owned(slice_ref),
144 channels: N,
145 width,
146 height,
147 stride: width * N,
148 bit_depth: 0,
149 })
150 }
151
152 pub fn borrow(
154 slice_ref: &'a [T],
155 width: usize,
156 height: usize,
157 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
158 let expected_size = width * height * N;
159 if slice_ref.len() != width * height * N {
160 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
161 expected: expected_size,
162 width,
163 height,
164 channels: N,
165 slice_len: slice_ref.len(),
166 }));
167 }
168 Ok(ImageStore::<T, N> {
169 buffer: std::borrow::Cow::Borrowed(slice_ref),
170 channels: N,
171 width,
172 height,
173 stride: width * N,
174 bit_depth: 0,
175 })
176 }
177
178 pub fn alloc(width: usize, height: usize) -> ImageStore<'a, T, N> {
180 let vc = vec![T::default(); width * N * height];
181 ImageStore::<T, N> {
182 buffer: std::borrow::Cow::Owned(vc),
183 channels: N,
184 width,
185 height,
186 stride: width * N,
187 bit_depth: 0,
188 }
189 }
190}
191
192impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u8, N> {
193 fn should_have_bit_depth(&self) -> bool {
194 false
195 }
196}
197
198impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f32, N> {
199 fn should_have_bit_depth(&self) -> bool {
200 false
201 }
202}
203
204#[cfg(feature = "nightly_f16")]
205impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f16, N> {
206 fn should_have_bit_depth(&self) -> bool {
207 false
208 }
209}
210
211impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u16, N> {
212 fn should_have_bit_depth(&self) -> bool {
213 true
214 }
215}
216
217impl<T, const N: usize> ImageStoreMut<'_, T, N> {
218 pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
219 let expected_size = self.stride() * self.height;
220 if self.width == 0 || self.height == 0 {
221 return Err(PicScaleError::ZeroImageDimensions);
222 }
223 if self.buffer.borrow().len() != self.stride() * self.height {
224 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
225 expected: expected_size,
226 width: self.width,
227 height: self.height,
228 channels: N,
229 slice_len: self.buffer.borrow().len(),
230 }));
231 }
232 if self.stride < self.width * N {
233 return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
234 }
235 if check_image_size_overflow(self.width, self.height, self.channels) {
236 return Err(PicScaleError::SourceImageIsTooLarge);
237 }
238 Ok(())
239 }
240}
241
242impl<T, const N: usize> ImageStore<'_, T, N>
243where
244 [T]: ToOwned<Owned = Vec<T>>,
245{
246 pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
247 let expected_size = self.stride() * self.height;
248 if self.width == 0 || self.height == 0 {
249 return Err(PicScaleError::ZeroImageDimensions);
250 }
251 if self.buffer.as_ref().len() != self.stride() * self.height {
252 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
253 expected: expected_size,
254 width: self.width,
255 height: self.height,
256 channels: N,
257 slice_len: self.buffer.as_ref().len(),
258 }));
259 }
260 if self.stride < self.width * N {
261 return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
262 }
263 if check_image_size_overflow(self.width, self.height, self.channels) {
264 return Err(PicScaleError::DestinationImageIsTooLarge);
265 }
266
267 Ok(())
268 }
269}
270
271impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
272where
273 T: Clone + Copy + Debug + Default,
274{
275 pub fn new(
279 slice_ref: Vec<T>,
280 width: usize,
281 height: usize,
282 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
283 let expected_size = width * height * N;
284 if slice_ref.len() != width * height * N {
285 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
286 expected: expected_size,
287 width,
288 height,
289 channels: N,
290 slice_len: slice_ref.len(),
291 }));
292 }
293 Ok(ImageStoreMut::<T, N> {
294 buffer: BufferStore::Owned(slice_ref),
295 channels: N,
296 width,
297 height,
298 stride: width * N,
299 bit_depth: 0,
300 })
301 }
302
303 pub fn borrow(
307 slice_ref: &'a mut [T],
308 width: usize,
309 height: usize,
310 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
311 let expected_size = width * height * N;
312 if slice_ref.len() != width * height * N {
313 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
314 expected: expected_size,
315 width,
316 height,
317 channels: N,
318 slice_len: slice_ref.len(),
319 }));
320 }
321 Ok(ImageStoreMut::<T, N> {
322 buffer: BufferStore::Borrowed(slice_ref),
323 channels: N,
324 width,
325 height,
326 stride: width * N,
327 bit_depth: 0,
328 })
329 }
330
331 pub fn alloc(width: usize, height: usize) -> ImageStoreMut<'a, T, N> {
335 let vc = vec![T::default(); width * N * height];
336 ImageStoreMut::<T, N> {
337 buffer: BufferStore::Owned(vc),
338 channels: N,
339 width,
340 height,
341 stride: width * N,
342 bit_depth: 0,
343 }
344 }
345
346 pub fn try_alloc(
350 width: usize,
351 height: usize,
352 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
353 let vc = try_vec![T::default(); width * N * height];
354 Ok(ImageStoreMut::<T, N> {
355 buffer: BufferStore::Owned(vc),
356 channels: N,
357 width,
358 height,
359 stride: width * N,
360 bit_depth: 0,
361 })
362 }
363
364 pub fn alloc_with_depth(
366 width: usize,
367 height: usize,
368 bit_depth: usize,
369 ) -> ImageStoreMut<'a, T, N> {
370 let vc = vec![T::default(); width * N * height];
371 ImageStoreMut::<T, N> {
372 buffer: BufferStore::Owned(vc),
373 channels: N,
374 width,
375 height,
376 stride: width * N,
377 bit_depth,
378 }
379 }
380
381 pub fn try_alloc_with_depth(
383 width: usize,
384 height: usize,
385 bit_depth: usize,
386 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
387 let vc = try_vec![T::default(); width * N * height];
388 Ok(ImageStoreMut::<T, N> {
389 buffer: BufferStore::Owned(vc),
390 channels: N,
391 width,
392 height,
393 stride: width * N,
394 bit_depth,
395 })
396 }
397}
398
399impl<T, const N: usize> ImageStoreMut<'_, T, N> {
400 #[inline]
404 pub fn stride(&self) -> usize {
405 if self.stride == 0 {
406 return self.width * N;
407 }
408 self.stride
409 }
410}
411
412impl<T, const N: usize> ImageStore<'_, T, N>
413where
414 [T]: ToOwned<Owned = Vec<T>>,
415{
416 #[inline]
420 pub fn stride(&self) -> usize {
421 if self.stride == 0 {
422 return self.width * N;
423 }
424 self.stride
425 }
426}
427
428impl<'a, T, const N: usize> ImageStore<'a, T, N>
429where
430 T: Clone + Copy + Debug,
431{
432 pub fn size(&self) -> ImageSize {
434 ImageSize::new(self.width, self.height)
435 }
436
437 pub fn as_bytes(&self) -> &[T] {
439 match &self.buffer {
440 std::borrow::Cow::Borrowed(br) => br,
441 std::borrow::Cow::Owned(v) => v.as_ref(),
442 }
443 }
444
445 pub fn from_slice(
447 slice_ref: &'a [T],
448 width: usize,
449 height: usize,
450 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
451 let expected_size = width * height * N;
452 if slice_ref.len() != width * height * N {
453 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
454 expected: expected_size,
455 width,
456 height,
457 channels: N,
458 slice_len: slice_ref.len(),
459 }));
460 }
461 Ok(ImageStore::<T, N> {
462 buffer: std::borrow::Cow::Borrowed(slice_ref),
463 channels: N,
464 width,
465 height,
466 stride: width * N,
467 bit_depth: 0,
468 })
469 }
470
471 pub fn copied<'b>(&self) -> ImageStore<'b, T, N> {
473 ImageStore::<T, N> {
474 buffer: std::borrow::Cow::Owned(self.buffer.as_ref().to_vec()),
475 channels: N,
476 width: self.width,
477 height: self.height,
478 stride: self.width * N,
479 bit_depth: self.bit_depth,
480 }
481 }
482
483 pub fn copied_to_mut(&self, into: &mut ImageStoreMut<T, N>) {
485 let into_stride = into.stride();
486 for (src_row, dst_row) in self
487 .buffer
488 .as_ref()
489 .chunks_exact(self.stride())
490 .zip(into.buffer.borrow_mut().chunks_exact_mut(into_stride))
491 {
492 for (&src, dst) in src_row.iter().zip(dst_row.iter_mut()) {
493 *dst = src;
494 }
495 }
496 }
497}
498
499impl<'a, T, const N: usize> ImageStoreMut<'a, T, N> {
500 pub fn size(&self) -> ImageSize {
502 ImageSize::new(self.width, self.height)
503 }
504
505 pub fn as_bytes(&self) -> &[T] {
507 match &self.buffer {
508 BufferStore::Borrowed(p) => p,
509 BufferStore::Owned(v) => v,
510 }
511 }
512
513 pub fn from_slice(
515 slice_ref: &'a mut [T],
516 width: usize,
517 height: usize,
518 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
519 let expected_size = width * height * N;
520 if slice_ref.len() != width * height * N {
521 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
522 expected: expected_size,
523 width,
524 height,
525 channels: N,
526 slice_len: slice_ref.len(),
527 }));
528 }
529 Ok(ImageStoreMut::<T, N> {
530 buffer: BufferStore::Borrowed(slice_ref),
531 channels: N,
532 width,
533 height,
534 stride: width * N,
535 bit_depth: 0,
536 })
537 }
538}
539
540impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
541where
542 T: Clone,
543{
544 pub fn copied<'b>(&self) -> ImageStoreMut<'b, T, N> {
546 ImageStoreMut::<T, N> {
547 buffer: BufferStore::Owned(self.buffer.borrow().to_vec()),
548 channels: N,
549 width: self.width,
550 height: self.height,
551 stride: self.width * N,
552 bit_depth: self.bit_depth,
553 }
554 }
555
556 pub fn to_immutable(&self) -> ImageStore<'_, T, N> {
558 ImageStore::<T, N> {
559 buffer: std::borrow::Cow::Borrowed(self.buffer.borrow()),
560 channels: N,
561 width: self.width,
562 height: self.height,
563 stride: self.width * N,
564 bit_depth: self.bit_depth,
565 }
566 }
567}
568
569pub(crate) trait AssociateAlpha<T: Clone + Copy + Debug, const N: usize> {
570 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, T, N>, pool: &novtb::ThreadPool);
571 fn is_alpha_premultiplication_needed(&self) -> bool;
572}
573
574pub(crate) trait UnassociateAlpha<T: Clone + Copy + Debug, const N: usize> {
575 fn unpremultiply_alpha(
576 &mut self,
577 pool: &novtb::ThreadPool,
578 workload_strategy: WorkloadStrategy,
579 );
580}
581
582impl AssociateAlpha<u8, 2> for ImageStore<'_, u8, 2> {
583 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 2>, pool: &novtb::ThreadPool) {
584 let dst_stride = into.stride();
585 let dst = into.buffer.borrow_mut();
586 let src = self.buffer.as_ref();
587 use crate::alpha_handle_u8::premultiply_alpha_gray_alpha;
588 premultiply_alpha_gray_alpha(
589 dst,
590 dst_stride,
591 src,
592 self.width,
593 self.height,
594 self.stride(),
595 pool,
596 );
597 }
598
599 fn is_alpha_premultiplication_needed(&self) -> bool {
600 use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha8;
601 has_non_constant_cap_alpha_gray_alpha8(self.buffer.as_ref(), self.width, self.stride())
602 }
603}
604
605impl AssociateAlpha<u16, 2> for ImageStore<'_, u16, 2> {
606 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 2>, pool: &novtb::ThreadPool) {
607 let dst_stride = into.stride();
608 let dst = into.buffer.borrow_mut();
609 let src = self.buffer.as_ref();
610 use crate::alpha_handle_u16::premultiply_alpha_gray_alpha_u16;
611 premultiply_alpha_gray_alpha_u16(
612 dst,
613 dst_stride,
614 src,
615 self.width,
616 self.height,
617 self.stride(),
618 into.bit_depth,
619 pool,
620 );
621 }
622
623 fn is_alpha_premultiplication_needed(&self) -> bool {
624 use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha16;
625 has_non_constant_cap_alpha_gray_alpha16(self.buffer.as_ref(), self.width, self.stride())
626 }
627}
628
629impl AssociateAlpha<f32, 2> for ImageStore<'_, f32, 2> {
630 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 2>, pool: &novtb::ThreadPool) {
631 let dst_stride = into.stride();
632 let dst = into.buffer.borrow_mut();
633 let src = self.buffer.as_ref();
634 use crate::alpha_handle_f32::premultiply_alpha_gray_alpha_f32;
635 premultiply_alpha_gray_alpha_f32(
636 dst,
637 dst_stride,
638 src,
639 self.stride(),
640 self.width,
641 self.height,
642 pool,
643 );
644 }
645
646 fn is_alpha_premultiplication_needed(&self) -> bool {
647 use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha_f32;
648 has_non_constant_cap_alpha_gray_alpha_f32(self.buffer.as_ref(), self.width, self.stride())
649 }
650}
651
652impl AssociateAlpha<u8, 4> for ImageStore<'_, u8, 4> {
653 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 4>, pool: &novtb::ThreadPool) {
654 let dst_stride = into.stride();
655 let dst = into.buffer.borrow_mut();
656 let src = self.buffer.as_ref();
657 premultiply_alpha_rgba(
658 dst,
659 dst_stride,
660 src,
661 self.width,
662 self.height,
663 self.stride(),
664 pool,
665 );
666 }
667
668 #[cfg(not(any(
669 any(target_arch = "x86_64", target_arch = "x86"),
670 all(target_arch = "aarch64", feature = "neon")
671 )))]
672 fn is_alpha_premultiplication_needed(&self) -> bool {
673 use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
674 has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
675 }
676
677 #[cfg(all(target_arch = "aarch64", feature = "neon"))]
678 fn is_alpha_premultiplication_needed(&self) -> bool {
679 use crate::neon::neon_has_non_constant_cap_alpha_rgba8;
680 neon_has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
681 }
682
683 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
684 fn is_alpha_premultiplication_needed(&self) -> bool {
685 use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
686 #[cfg(feature = "sse")]
687 use crate::sse::sse_has_non_constant_cap_alpha_rgba8;
688 #[cfg(all(target_arch = "x86_64", feature = "avx512"))]
689 if std::arch::is_x86_feature_detected!("avx512bw") {
690 use crate::avx512::avx512_has_non_constant_cap_alpha_rgba8;
691 return avx512_has_non_constant_cap_alpha_rgba8(
692 self.buffer.as_ref(),
693 self.width,
694 self.stride(),
695 );
696 }
697 #[cfg(all(target_arch = "x86_64", feature = "avx"))]
698 if std::arch::is_x86_feature_detected!("avx2") {
699 use crate::avx2::avx_has_non_constant_cap_alpha_rgba8;
700 return avx_has_non_constant_cap_alpha_rgba8(
701 self.buffer.as_ref(),
702 self.width,
703 self.stride(),
704 );
705 }
706 #[cfg(feature = "sse")]
707 if std::arch::is_x86_feature_detected!("sse4.1") {
708 return sse_has_non_constant_cap_alpha_rgba8(
709 self.buffer.as_ref(),
710 self.width,
711 self.stride(),
712 );
713 }
714 has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
715 }
716}
717
718impl UnassociateAlpha<u8, 4> for ImageStoreMut<'_, u8, 4> {
719 fn unpremultiply_alpha(
720 &mut self,
721 pool: &novtb::ThreadPool,
722 workload_strategy: WorkloadStrategy,
723 ) {
724 let src_stride = self.stride();
725 let dst = self.buffer.borrow_mut();
726 unpremultiply_alpha_rgba(
727 dst,
728 self.width,
729 self.height,
730 src_stride,
731 pool,
732 workload_strategy,
733 );
734 }
735}
736
737impl UnassociateAlpha<u8, 2> for ImageStoreMut<'_, u8, 2> {
738 fn unpremultiply_alpha(
739 &mut self,
740 pool: &novtb::ThreadPool,
741 workload_strategy: WorkloadStrategy,
742 ) {
743 let src_stride = self.stride();
744 let dst = self.buffer.borrow_mut();
745 use crate::alpha_handle_u8::unpremultiply_alpha_gray_alpha;
746 unpremultiply_alpha_gray_alpha(
747 dst,
748 self.width,
749 self.height,
750 src_stride,
751 pool,
752 workload_strategy,
753 );
754 }
755}
756
757impl UnassociateAlpha<f32, 2> for ImageStoreMut<'_, f32, 2> {
758 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
759 let src_stride = self.stride();
760 let dst = self.buffer.borrow_mut();
761 use crate::alpha_handle_f32::unpremultiply_alpha_gray_alpha_f32;
762 unpremultiply_alpha_gray_alpha_f32(dst, src_stride, self.width, self.height, pool);
763 }
764}
765
766impl UnassociateAlpha<u16, 2> for ImageStoreMut<'_, u16, 2> {
767 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
768 let src_stride = self.stride();
769 let dst = self.buffer.borrow_mut();
770 use crate::alpha_handle_u16::unpremultiply_alpha_gray_alpha_u16;
771 unpremultiply_alpha_gray_alpha_u16(
772 dst,
773 src_stride,
774 self.width,
775 self.height,
776 self.bit_depth,
777 pool,
778 );
779 }
780}
781
782impl AssociateAlpha<u16, 4> for ImageStore<'_, u16, 4> {
783 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 4>, pool: &novtb::ThreadPool) {
784 let dst_stride = into.stride();
785 let dst = into.buffer.borrow_mut();
786 let src = self.buffer.as_ref();
787 premultiply_alpha_rgba_u16(
788 dst,
789 dst_stride,
790 src,
791 self.width,
792 self.height,
793 self.stride(),
794 into.bit_depth,
795 pool,
796 );
797 }
798
799 #[cfg(not(any(
800 any(target_arch = "x86_64", target_arch = "x86"),
801 all(target_arch = "aarch64", feature = "neon")
802 )))]
803 fn is_alpha_premultiplication_needed(&self) -> bool {
804 use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
805 has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
806 }
807
808 #[cfg(all(target_arch = "aarch64", feature = "neon"))]
809 fn is_alpha_premultiplication_needed(&self) -> bool {
810 use crate::neon::neon_has_non_constant_cap_alpha_rgba16;
811 neon_has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
812 }
813
814 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
815 fn is_alpha_premultiplication_needed(&self) -> bool {
816 use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
817 #[cfg(feature = "sse")]
818 use crate::sse::sse_has_non_constant_cap_alpha_rgba16;
819 #[cfg(all(target_arch = "x86_64", feature = "avx"))]
820 if std::arch::is_x86_feature_detected!("avx2") {
821 use crate::avx2::avx_has_non_constant_cap_alpha_rgba16;
822 return avx_has_non_constant_cap_alpha_rgba16(
823 self.buffer.as_ref(),
824 self.width,
825 self.stride(),
826 );
827 }
828 #[cfg(feature = "sse")]
829 if std::arch::is_x86_feature_detected!("sse4.1") {
830 return sse_has_non_constant_cap_alpha_rgba16(
831 self.buffer.as_ref(),
832 self.width,
833 self.stride(),
834 );
835 }
836 has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
837 }
838}
839
840impl AssociateAlpha<f32, 4> for ImageStore<'_, f32, 4> {
841 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 4>, pool: &novtb::ThreadPool) {
842 let src_stride = self.stride();
843 let dst_stride = into.stride();
844 let dst = into.buffer.borrow_mut();
845 let src = self.buffer.as_ref();
846 premultiply_alpha_rgba_f32(
847 dst,
848 dst_stride,
849 src,
850 src_stride,
851 self.width,
852 self.height,
853 pool,
854 );
855 }
856
857 fn is_alpha_premultiplication_needed(&self) -> bool {
858 has_non_constant_cap_alpha_rgba_f32(self.buffer.as_ref(), self.width, self.stride())
859 }
860}
861
862#[cfg(feature = "nightly_f16")]
863impl AssociateAlpha<f16, 4> for ImageStore<'_, f16, 4> {
864 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f16, 4>, pool: &novtb::ThreadPool) {
865 let src_stride = self.stride();
866 let dst_stride = into.stride();
867 let dst = into.buffer.borrow_mut();
868 let src = self.buffer.as_ref();
869 premultiply_alpha_rgba_f16(
870 dst,
871 dst_stride,
872 src,
873 src_stride,
874 self.width,
875 self.height,
876 pool,
877 );
878 }
879
880 fn is_alpha_premultiplication_needed(&self) -> bool {
881 true
882 }
883}
884
885impl UnassociateAlpha<u16, 4> for ImageStoreMut<'_, u16, 4> {
886 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
887 let src_stride = self.stride();
888 let in_place = self.buffer.borrow_mut();
889 unpremultiply_alpha_rgba_u16(
890 in_place,
891 src_stride,
892 self.width,
893 self.height,
894 self.bit_depth,
895 pool,
896 );
897 }
898}
899
900impl UnassociateAlpha<f32, 4> for ImageStoreMut<'_, f32, 4> {
901 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
902 let stride = self.stride();
903 let dst = self.buffer.borrow_mut();
904 unpremultiply_alpha_rgba_f32(dst, stride, self.width, self.height, pool);
905 }
906}
907
908#[cfg(feature = "nightly_f16")]
909impl UnassociateAlpha<f16, 4> for ImageStoreMut<'_, f16, 4> {
910 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
911 let stride = self.stride();
912 let dst = self.buffer.borrow_mut();
913 unpremultiply_alpha_rgba_f16(dst, stride, self.width, self.height, pool);
914 }
915}
916
917pub type Planar8ImageStore<'a> = ImageStore<'a, u8, 1>;
918pub type Planar8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 1>;
919pub type CbCr8ImageStore<'a> = ImageStore<'a, u8, 2>;
920pub type CbCr8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
921pub type GrayAlpha8ImageStore<'a> = ImageStore<'a, u8, 2>;
922pub type GrayAlpha8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
923pub type Rgba8ImageStore<'a> = ImageStore<'a, u8, 4>;
924pub type Rgba8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 4>;
925pub type Rgb8ImageStore<'a> = ImageStore<'a, u8, 3>;
926pub type Rgb8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 3>;
927
928pub type Planar16ImageStore<'a> = ImageStore<'a, u16, 1>;
929pub type Planar16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 1>;
930pub type CbCr16ImageStore<'a> = ImageStore<'a, u16, 2>;
931pub type CbCr16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
932pub type GrayAlpha16ImageStore<'a> = ImageStore<'a, u16, 2>;
933pub type GrayAlpha16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
934pub type Rgba16ImageStore<'a> = ImageStore<'a, u16, 4>;
935pub type Rgba16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 4>;
936pub type Rgb16ImageStore<'a> = ImageStore<'a, u16, 3>;
937pub type Rgb16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 3>;
938
939#[cfg(feature = "nightly_f16")]
940pub type PlanarF16ImageStore<'a> = ImageStore<'a, f16, 1>;
941#[cfg(feature = "nightly_f16")]
942pub type PlanarF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 1>;
943#[cfg(feature = "nightly_f16")]
944pub type CbCrF16ImageStore<'a> = ImageStore<'a, f16, 2>;
945#[cfg(feature = "nightly_f16")]
946pub type CbCrF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 2>;
947#[cfg(feature = "nightly_f16")]
948pub type RgbaF16ImageStore<'a> = ImageStore<'a, f16, 4>;
949#[cfg(feature = "nightly_f16")]
950pub type RgbaF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 4>;
951#[cfg(feature = "nightly_f16")]
952pub type RgbF16ImageStore<'a> = ImageStore<'a, f16, 3>;
953#[cfg(feature = "nightly_f16")]
954pub type RgbF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 3>;
955
956pub type PlanarF32ImageStore<'a> = ImageStore<'a, f32, 1>;
957pub type PlanarF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 1>;
958pub type CbCrF32ImageStore<'a> = ImageStore<'a, f32, 2>;
959pub type CbCrF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
960pub type GrayAlphaF32ImageStore<'a> = ImageStore<'a, f32, 2>;
961pub type GrayAlphaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
962pub type RgbaF32ImageStore<'a> = ImageStore<'a, f32, 4>;
963pub type RgbaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 4>;
964pub type RgbF32ImageStore<'a> = ImageStore<'a, f32, 3>;
965pub type RgbF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 3>;
966
967#[cfg(test)]
968mod tests {
969 use super::*;
970
971 #[test]
972 fn image_store_alpha_test_rgba8() {
973 let image_size = 256usize;
974 let mut image = vec![0u8; image_size * image_size * 4];
975 image[3 + 150 * 4] = 75;
976 let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
977 let has_alpha = store.is_alpha_premultiplication_needed();
978 assert_eq!(true, has_alpha);
979 }
980
981 #[test]
982 fn check_alpha_not_exists_rgba8() {
983 let image_size = 256usize;
984 let image = vec![255u8; image_size * image_size * 4];
985 let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
986 let has_alpha = store.is_alpha_premultiplication_needed();
987 assert_eq!(false, has_alpha);
988 }
989}