1use crate::alpha_check::has_non_constant_cap_alpha_rgba_f32;
30#[cfg(feature = "nightly_f16")]
31use crate::alpha_handle_f16::{premultiply_alpha_rgba_f16, unpremultiply_alpha_rgba_f16};
32use crate::alpha_handle_f32::{premultiply_alpha_rgba_f32, unpremultiply_alpha_rgba_f32};
33use crate::alpha_handle_u8::{premultiply_alpha_rgba, unpremultiply_alpha_rgba};
34use crate::alpha_handle_u16::{premultiply_alpha_rgba_u16, unpremultiply_alpha_rgba_u16};
35use crate::pic_scale_error::{PicScaleBufferMismatch, PicScaleError};
36use crate::{ImageSize, WorkloadStrategy};
37#[cfg(feature = "nightly_f16")]
38use core::f16;
39use std::fmt::Debug;
40
41#[derive(Debug, Clone)]
51pub struct ImageStore<'a, T, const N: usize>
52where
53 T: Clone + Copy + Debug,
54{
55 pub buffer: std::borrow::Cow<'a, [T]>,
56 pub channels: usize,
58 pub width: usize,
60 pub height: usize,
62 pub stride: usize,
64 pub bit_depth: usize,
66}
67
68#[derive(Debug)]
78pub struct ImageStoreMut<'a, T, const N: usize>
79where
80 T: Clone + Copy + Debug,
81{
82 pub buffer: BufferStore<'a, T>,
83 pub channels: usize,
85 pub width: usize,
87 pub height: usize,
89 pub stride: usize,
91 pub bit_depth: usize,
93}
94
95pub(crate) trait CheckStoreDensity {
96 fn should_have_bit_depth(&self) -> bool;
97}
98
99#[derive(Debug)]
101pub enum BufferStore<'a, T: Copy + Debug> {
102 Borrowed(&'a mut [T]),
103 Owned(Vec<T>),
104}
105
106impl<T: Copy + Debug> BufferStore<'_, T> {
107 #[allow(clippy::should_implement_trait)]
108 pub fn borrow(&self) -> &[T] {
110 match self {
111 Self::Borrowed(p_ref) => p_ref,
112 Self::Owned(vec) => vec,
113 }
114 }
115
116 #[allow(clippy::should_implement_trait)]
117 pub fn borrow_mut(&mut self) -> &mut [T] {
119 match self {
120 Self::Borrowed(p_ref) => p_ref,
121 Self::Owned(vec) => vec,
122 }
123 }
124}
125
126impl<'a, T, const N: usize> ImageStore<'a, T, N>
127where
128 T: Clone + Copy + Debug + Default,
129{
130 pub fn new(
132 slice_ref: Vec<T>,
133 width: usize,
134 height: usize,
135 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
136 let expected_size = width * height * N;
137 if slice_ref.len() != width * height * N {
138 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
139 expected: expected_size,
140 width,
141 height,
142 channels: N,
143 slice_len: slice_ref.len(),
144 }));
145 }
146 Ok(ImageStore::<T, N> {
147 buffer: std::borrow::Cow::Owned(slice_ref),
148 channels: N,
149 width,
150 height,
151 stride: width * N,
152 bit_depth: 0,
153 })
154 }
155
156 pub fn borrow(
158 slice_ref: &'a [T],
159 width: usize,
160 height: usize,
161 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
162 let expected_size = width * height * N;
163 if slice_ref.len() != width * height * N {
164 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
165 expected: expected_size,
166 width,
167 height,
168 channels: N,
169 slice_len: slice_ref.len(),
170 }));
171 }
172 Ok(ImageStore::<T, N> {
173 buffer: std::borrow::Cow::Borrowed(slice_ref),
174 channels: N,
175 width,
176 height,
177 stride: width * N,
178 bit_depth: 0,
179 })
180 }
181
182 pub fn alloc(width: usize, height: usize) -> ImageStore<'a, T, N> {
184 let vc = vec![T::default(); width * N * height];
185 ImageStore::<T, N> {
186 buffer: std::borrow::Cow::Owned(vc),
187 channels: N,
188 width,
189 height,
190 stride: width * N,
191 bit_depth: 0,
192 }
193 }
194}
195
196impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u8, N> {
197 fn should_have_bit_depth(&self) -> bool {
198 false
199 }
200}
201
202impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f32, N> {
203 fn should_have_bit_depth(&self) -> bool {
204 false
205 }
206}
207
208#[cfg(feature = "nightly_f16")]
209impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f16, N> {
210 fn should_have_bit_depth(&self) -> bool {
211 false
212 }
213}
214
215impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u16, N> {
216 fn should_have_bit_depth(&self) -> bool {
217 true
218 }
219}
220
221impl<T, const N: usize> ImageStoreMut<'_, T, N>
222where
223 T: Clone + Copy + Debug + Default,
224{
225 pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
226 let expected_size = self.stride() * self.height;
227 if self.buffer.borrow().len() != self.stride() * self.height {
228 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
229 expected: expected_size,
230 width: self.width,
231 height: self.height,
232 channels: N,
233 slice_len: self.buffer.borrow().len(),
234 }));
235 }
236 if self.stride < self.width * N {
237 return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
238 }
239 Ok(())
240 }
241}
242
243impl<T, const N: usize> ImageStore<'_, T, N>
244where
245 T: Clone + Copy + Debug + Default,
246{
247 pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
248 let expected_size = self.stride() * self.height;
249 if self.buffer.as_ref().len() != self.stride() * self.height {
250 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
251 expected: expected_size,
252 width: self.width,
253 height: self.height,
254 channels: N,
255 slice_len: self.buffer.as_ref().len(),
256 }));
257 }
258 if self.stride < self.width * N {
259 return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
260 }
261 Ok(())
262 }
263}
264
265impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
266where
267 T: Clone + Copy + Debug + Default,
268{
269 pub fn new(
273 slice_ref: Vec<T>,
274 width: usize,
275 height: usize,
276 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
277 let expected_size = width * height * N;
278 if slice_ref.len() != width * height * N {
279 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
280 expected: expected_size,
281 width,
282 height,
283 channels: N,
284 slice_len: slice_ref.len(),
285 }));
286 }
287 Ok(ImageStoreMut::<T, N> {
288 buffer: BufferStore::Owned(slice_ref),
289 channels: N,
290 width,
291 height,
292 stride: width * N,
293 bit_depth: 0,
294 })
295 }
296
297 pub fn borrow(
301 slice_ref: &'a mut [T],
302 width: usize,
303 height: usize,
304 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
305 let expected_size = width * height * N;
306 if slice_ref.len() != width * height * N {
307 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
308 expected: expected_size,
309 width,
310 height,
311 channels: N,
312 slice_len: slice_ref.len(),
313 }));
314 }
315 Ok(ImageStoreMut::<T, N> {
316 buffer: BufferStore::Borrowed(slice_ref),
317 channels: N,
318 width,
319 height,
320 stride: width * N,
321 bit_depth: 0,
322 })
323 }
324
325 pub fn alloc(width: usize, height: usize) -> ImageStoreMut<'a, T, N> {
329 let vc = vec![T::default(); width * N * height];
330 ImageStoreMut::<T, N> {
331 buffer: BufferStore::Owned(vc),
332 channels: N,
333 width,
334 height,
335 stride: width * N,
336 bit_depth: 0,
337 }
338 }
339
340 pub fn alloc_with_depth(
342 width: usize,
343 height: usize,
344 bit_depth: usize,
345 ) -> ImageStoreMut<'a, T, N> {
346 let vc = vec![T::default(); width * N * height];
347 ImageStoreMut::<T, N> {
348 buffer: BufferStore::Owned(vc),
349 channels: N,
350 width,
351 height,
352 stride: width * N,
353 bit_depth,
354 }
355 }
356}
357
358impl<T, const N: usize> ImageStoreMut<'_, T, N>
359where
360 T: Clone + Copy + Debug,
361{
362 #[inline]
366 pub fn stride(&self) -> usize {
367 if self.stride == 0 {
368 return self.width * N;
369 }
370 self.stride
371 }
372}
373
374impl<T, const N: usize> ImageStore<'_, T, N>
375where
376 T: Clone + Copy + Debug,
377{
378 #[inline]
382 pub fn stride(&self) -> usize {
383 if self.stride == 0 {
384 return self.width * N;
385 }
386 self.stride
387 }
388}
389
390impl<'a, T, const N: usize> ImageStore<'a, T, N>
391where
392 T: Clone + Copy + Debug,
393{
394 pub fn get_size(&self) -> ImageSize {
396 ImageSize::new(self.width, self.height)
397 }
398
399 pub fn as_bytes(&self) -> &[T] {
401 match &self.buffer {
402 std::borrow::Cow::Borrowed(br) => br,
403 std::borrow::Cow::Owned(v) => v.as_ref(),
404 }
405 }
406
407 pub fn from_slice(
409 slice_ref: &'a [T],
410 width: usize,
411 height: usize,
412 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
413 let expected_size = width * height * N;
414 if slice_ref.len() != width * height * N {
415 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
416 expected: expected_size,
417 width,
418 height,
419 channels: N,
420 slice_len: slice_ref.len(),
421 }));
422 }
423 Ok(ImageStore::<T, N> {
424 buffer: std::borrow::Cow::Borrowed(slice_ref),
425 channels: N,
426 width,
427 height,
428 stride: width * N,
429 bit_depth: 0,
430 })
431 }
432
433 pub fn copied<'b>(&self) -> ImageStore<'b, T, N> {
435 ImageStore::<T, N> {
436 buffer: std::borrow::Cow::Owned(self.buffer.as_ref().to_vec()),
437 channels: N,
438 width: self.width,
439 height: self.height,
440 stride: self.width * N,
441 bit_depth: self.bit_depth,
442 }
443 }
444
445 pub fn copied_to_mut(&self, into: &mut ImageStoreMut<T, N>) {
447 let into_stride = into.stride();
448 for (src_row, dst_row) in self
449 .buffer
450 .as_ref()
451 .chunks_exact(self.stride())
452 .zip(into.buffer.borrow_mut().chunks_exact_mut(into_stride))
453 {
454 for (&src, dst) in src_row.iter().zip(dst_row.iter_mut()) {
455 *dst = src;
456 }
457 }
458 }
459}
460
461impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
462where
463 T: Clone + Copy + Debug,
464{
465 pub fn get_size(&self) -> ImageSize {
467 ImageSize::new(self.width, self.height)
468 }
469
470 pub fn as_bytes(&self) -> &[T] {
472 match &self.buffer {
473 BufferStore::Borrowed(p) => p,
474 BufferStore::Owned(v) => v,
475 }
476 }
477
478 pub fn from_slice(
480 slice_ref: &'a mut [T],
481 width: usize,
482 height: usize,
483 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
484 let expected_size = width * height * N;
485 if slice_ref.len() != width * height * N {
486 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
487 expected: expected_size,
488 width,
489 height,
490 channels: N,
491 slice_len: slice_ref.len(),
492 }));
493 }
494 Ok(ImageStoreMut::<T, N> {
495 buffer: BufferStore::Borrowed(slice_ref),
496 channels: N,
497 width,
498 height,
499 stride: width * N,
500 bit_depth: 0,
501 })
502 }
503
504 pub fn copied<'b>(&self) -> ImageStoreMut<'b, T, N> {
506 ImageStoreMut::<T, N> {
507 buffer: BufferStore::Owned(self.buffer.borrow().to_vec()),
508 channels: N,
509 width: self.width,
510 height: self.height,
511 stride: self.width * N,
512 bit_depth: self.bit_depth,
513 }
514 }
515
516 pub fn to_immutable(&self) -> ImageStore<'_, T, N> {
518 ImageStore::<T, N> {
519 buffer: std::borrow::Cow::Owned(self.buffer.borrow().to_owned()),
520 channels: N,
521 width: self.width,
522 height: self.height,
523 stride: self.width * N,
524 bit_depth: self.bit_depth,
525 }
526 }
527}
528
529pub(crate) trait AssociateAlpha<T: Clone + Copy + Debug, const N: usize> {
530 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, T, N>, pool: &novtb::ThreadPool);
531 fn is_alpha_premultiplication_needed(&self) -> bool;
532}
533
534pub(crate) trait UnassociateAlpha<T: Clone + Copy + Debug, const N: usize> {
535 fn unpremultiply_alpha(
536 &mut self,
537 pool: &novtb::ThreadPool,
538 workload_strategy: WorkloadStrategy,
539 );
540}
541
542impl AssociateAlpha<u8, 2> for ImageStore<'_, u8, 2> {
543 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 2>, pool: &novtb::ThreadPool) {
544 let dst_stride = into.stride();
545 let dst = into.buffer.borrow_mut();
546 let src = self.buffer.as_ref();
547 use crate::alpha_handle_u8::premultiply_alpha_gray_alpha;
548 premultiply_alpha_gray_alpha(
549 dst,
550 dst_stride,
551 src,
552 self.width,
553 self.height,
554 self.stride(),
555 pool,
556 );
557 }
558
559 fn is_alpha_premultiplication_needed(&self) -> bool {
560 use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha8;
561 has_non_constant_cap_alpha_gray_alpha8(self.buffer.as_ref(), self.width, self.stride())
562 }
563}
564
565impl AssociateAlpha<u16, 2> for ImageStore<'_, u16, 2> {
566 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 2>, pool: &novtb::ThreadPool) {
567 let dst_stride = into.stride();
568 let dst = into.buffer.borrow_mut();
569 let src = self.buffer.as_ref();
570 use crate::alpha_handle_u16::premultiply_alpha_gray_alpha_u16;
571 premultiply_alpha_gray_alpha_u16(
572 dst,
573 dst_stride,
574 src,
575 self.width,
576 self.height,
577 self.stride(),
578 into.bit_depth,
579 pool,
580 );
581 }
582
583 fn is_alpha_premultiplication_needed(&self) -> bool {
584 use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha16;
585 has_non_constant_cap_alpha_gray_alpha16(self.buffer.as_ref(), self.width, self.stride())
586 }
587}
588
589impl AssociateAlpha<f32, 2> for ImageStore<'_, f32, 2> {
590 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 2>, pool: &novtb::ThreadPool) {
591 let dst_stride = into.stride();
592 let dst = into.buffer.borrow_mut();
593 let src = self.buffer.as_ref();
594 use crate::alpha_handle_f32::premultiply_alpha_gray_alpha_f32;
595 premultiply_alpha_gray_alpha_f32(
596 dst,
597 dst_stride,
598 src,
599 self.stride(),
600 self.width,
601 self.height,
602 pool,
603 );
604 }
605
606 fn is_alpha_premultiplication_needed(&self) -> bool {
607 use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha_f32;
608 has_non_constant_cap_alpha_gray_alpha_f32(self.buffer.as_ref(), self.width, self.stride())
609 }
610}
611
612impl AssociateAlpha<u8, 4> for ImageStore<'_, u8, 4> {
613 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 4>, pool: &novtb::ThreadPool) {
614 let dst_stride = into.stride();
615 let dst = into.buffer.borrow_mut();
616 let src = self.buffer.as_ref();
617 premultiply_alpha_rgba(
618 dst,
619 dst_stride,
620 src,
621 self.width,
622 self.height,
623 self.stride(),
624 pool,
625 );
626 }
627
628 #[cfg(not(any(
629 any(target_arch = "x86_64", target_arch = "x86"),
630 all(target_arch = "aarch64", target_feature = "neon")
631 )))]
632 fn is_alpha_premultiplication_needed(&self) -> bool {
633 use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
634 has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
635 }
636
637 #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
638 fn is_alpha_premultiplication_needed(&self) -> bool {
639 use crate::neon::neon_has_non_constant_cap_alpha_rgba8;
640 neon_has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
641 }
642
643 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
644 fn is_alpha_premultiplication_needed(&self) -> bool {
645 use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
646 #[cfg(feature = "sse")]
647 use crate::sse::sse_has_non_constant_cap_alpha_rgba8;
648 #[cfg(all(target_arch = "x86_64", feature = "nightly_avx512"))]
649 if std::arch::is_x86_feature_detected!("avx512bw") {
650 use crate::avx512::avx512_has_non_constant_cap_alpha_rgba8;
651 return avx512_has_non_constant_cap_alpha_rgba8(
652 self.buffer.as_ref(),
653 self.width,
654 self.stride(),
655 );
656 }
657 #[cfg(all(target_arch = "x86_64", feature = "avx"))]
658 if std::arch::is_x86_feature_detected!("avx2") {
659 use crate::avx2::avx_has_non_constant_cap_alpha_rgba8;
660 return avx_has_non_constant_cap_alpha_rgba8(
661 self.buffer.as_ref(),
662 self.width,
663 self.stride(),
664 );
665 }
666 #[cfg(feature = "sse")]
667 if std::arch::is_x86_feature_detected!("sse4.1") {
668 return sse_has_non_constant_cap_alpha_rgba8(
669 self.buffer.as_ref(),
670 self.width,
671 self.stride(),
672 );
673 }
674 has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
675 }
676}
677
678impl UnassociateAlpha<u8, 4> for ImageStoreMut<'_, u8, 4> {
679 fn unpremultiply_alpha(
680 &mut self,
681 pool: &novtb::ThreadPool,
682 workload_strategy: WorkloadStrategy,
683 ) {
684 let src_stride = self.stride();
685 let dst = self.buffer.borrow_mut();
686 unpremultiply_alpha_rgba(
687 dst,
688 self.width,
689 self.height,
690 src_stride,
691 pool,
692 workload_strategy,
693 );
694 }
695}
696
697impl UnassociateAlpha<u8, 2> for ImageStoreMut<'_, u8, 2> {
698 fn unpremultiply_alpha(
699 &mut self,
700 pool: &novtb::ThreadPool,
701 workload_strategy: WorkloadStrategy,
702 ) {
703 let src_stride = self.stride();
704 let dst = self.buffer.borrow_mut();
705 use crate::alpha_handle_u8::unpremultiply_alpha_gray_alpha;
706 unpremultiply_alpha_gray_alpha(
707 dst,
708 self.width,
709 self.height,
710 src_stride,
711 pool,
712 workload_strategy,
713 );
714 }
715}
716
717impl UnassociateAlpha<f32, 2> for ImageStoreMut<'_, f32, 2> {
718 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
719 let src_stride = self.stride();
720 let dst = self.buffer.borrow_mut();
721 use crate::alpha_handle_f32::unpremultiply_alpha_gray_alpha_f32;
722 unpremultiply_alpha_gray_alpha_f32(dst, src_stride, self.width, self.height, pool);
723 }
724}
725
726impl UnassociateAlpha<u16, 2> for ImageStoreMut<'_, u16, 2> {
727 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
728 let src_stride = self.stride();
729 let dst = self.buffer.borrow_mut();
730 use crate::alpha_handle_u16::unpremultiply_alpha_gray_alpha_u16;
731 unpremultiply_alpha_gray_alpha_u16(
732 dst,
733 src_stride,
734 self.width,
735 self.height,
736 self.bit_depth,
737 pool,
738 );
739 }
740}
741
742impl AssociateAlpha<u16, 4> for ImageStore<'_, u16, 4> {
743 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 4>, pool: &novtb::ThreadPool) {
744 let dst_stride = into.stride();
745 let dst = into.buffer.borrow_mut();
746 let src = self.buffer.as_ref();
747 premultiply_alpha_rgba_u16(
748 dst,
749 dst_stride,
750 src,
751 self.width,
752 self.height,
753 self.stride(),
754 into.bit_depth,
755 pool,
756 );
757 }
758
759 #[cfg(not(any(
760 any(target_arch = "x86_64", target_arch = "x86"),
761 all(target_arch = "aarch64", target_feature = "neon")
762 )))]
763 fn is_alpha_premultiplication_needed(&self) -> bool {
764 use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
765 has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
766 }
767
768 #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
769 fn is_alpha_premultiplication_needed(&self) -> bool {
770 use crate::neon::neon_has_non_constant_cap_alpha_rgba16;
771 neon_has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
772 }
773
774 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
775 fn is_alpha_premultiplication_needed(&self) -> bool {
776 use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
777 #[cfg(feature = "sse")]
778 use crate::sse::sse_has_non_constant_cap_alpha_rgba16;
779 #[cfg(all(target_arch = "x86_64", feature = "avx"))]
780 if std::arch::is_x86_feature_detected!("avx2") {
781 use crate::avx2::avx_has_non_constant_cap_alpha_rgba16;
782 return avx_has_non_constant_cap_alpha_rgba16(
783 self.buffer.as_ref(),
784 self.width,
785 self.stride(),
786 );
787 }
788 #[cfg(feature = "sse")]
789 if std::arch::is_x86_feature_detected!("sse4.1") {
790 return sse_has_non_constant_cap_alpha_rgba16(
791 self.buffer.as_ref(),
792 self.width,
793 self.stride(),
794 );
795 }
796 has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
797 }
798}
799
800impl AssociateAlpha<f32, 4> for ImageStore<'_, f32, 4> {
801 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 4>, pool: &novtb::ThreadPool) {
802 let src_stride = self.stride();
803 let dst_stride = into.stride();
804 let dst = into.buffer.borrow_mut();
805 let src = self.buffer.as_ref();
806 premultiply_alpha_rgba_f32(
807 dst,
808 dst_stride,
809 src,
810 src_stride,
811 self.width,
812 self.height,
813 pool,
814 );
815 }
816
817 fn is_alpha_premultiplication_needed(&self) -> bool {
818 has_non_constant_cap_alpha_rgba_f32(self.buffer.as_ref(), self.width, self.stride())
819 }
820}
821
822#[cfg(feature = "nightly_f16")]
823impl AssociateAlpha<f16, 4> for ImageStore<'_, f16, 4> {
824 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f16, 4>, pool: &novtb::ThreadPool) {
825 let src_stride = self.stride();
826 let dst_stride = into.stride();
827 let dst = into.buffer.borrow_mut();
828 let src = self.buffer.as_ref();
829 premultiply_alpha_rgba_f16(
830 dst,
831 dst_stride,
832 src,
833 src_stride,
834 self.width,
835 self.height,
836 pool,
837 );
838 }
839
840 fn is_alpha_premultiplication_needed(&self) -> bool {
841 true
842 }
843}
844
845impl UnassociateAlpha<u16, 4> for ImageStoreMut<'_, u16, 4> {
846 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
847 let src_stride = self.stride();
848 let in_place = self.buffer.borrow_mut();
849 unpremultiply_alpha_rgba_u16(
850 in_place,
851 src_stride,
852 self.width,
853 self.height,
854 self.bit_depth,
855 pool,
856 );
857 }
858}
859
860impl UnassociateAlpha<f32, 4> for ImageStoreMut<'_, f32, 4> {
861 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
862 let stride = self.stride();
863 let dst = self.buffer.borrow_mut();
864 unpremultiply_alpha_rgba_f32(dst, stride, self.width, self.height, pool);
865 }
866}
867
868#[cfg(feature = "nightly_f16")]
869impl UnassociateAlpha<f16, 4> for ImageStoreMut<'_, f16, 4> {
870 fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
871 let stride = self.stride();
872 let dst = self.buffer.borrow_mut();
873 unpremultiply_alpha_rgba_f16(dst, stride, self.width, self.height, pool);
874 }
875}
876
877pub type Planar8ImageStore<'a> = ImageStore<'a, u8, 1>;
878pub type Planar8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 1>;
879pub type CbCr8ImageStore<'a> = ImageStore<'a, u8, 2>;
880pub type CbCr8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
881pub type GrayAlpha8ImageStore<'a> = ImageStore<'a, u8, 2>;
882pub type GrayAlpha8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
883pub type Rgba8ImageStore<'a> = ImageStore<'a, u8, 4>;
884pub type Rgba8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 4>;
885pub type Rgb8ImageStore<'a> = ImageStore<'a, u8, 3>;
886pub type Rgb8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 3>;
887
888pub type Planar16ImageStore<'a> = ImageStore<'a, u16, 1>;
889pub type Planar16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 1>;
890pub type CbCr16ImageStore<'a> = ImageStore<'a, u16, 2>;
891pub type CbCr16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
892pub type GrayAlpha16ImageStore<'a> = ImageStore<'a, u16, 2>;
893pub type GrayAlpha16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
894pub type Rgba16ImageStore<'a> = ImageStore<'a, u16, 4>;
895pub type Rgba16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 4>;
896pub type Rgb16ImageStore<'a> = ImageStore<'a, u16, 3>;
897pub type Rgb16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 3>;
898
899#[cfg(feature = "nightly_f16")]
900pub type PlanarF16ImageStore<'a> = ImageStore<'a, f16, 1>;
901#[cfg(feature = "nightly_f16")]
902pub type PlanarF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 1>;
903#[cfg(feature = "nightly_f16")]
904pub type CbCrF16ImageStore<'a> = ImageStore<'a, f16, 2>;
905#[cfg(feature = "nightly_f16")]
906pub type CbCrF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 2>;
907#[cfg(feature = "nightly_f16")]
908pub type RgbaF16ImageStore<'a> = ImageStore<'a, f16, 4>;
909#[cfg(feature = "nightly_f16")]
910pub type RgbaF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 4>;
911#[cfg(feature = "nightly_f16")]
912pub type RgbF16ImageStore<'a> = ImageStore<'a, f16, 3>;
913#[cfg(feature = "nightly_f16")]
914pub type RgbF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 3>;
915
916pub type PlanarF32ImageStore<'a> = ImageStore<'a, f32, 1>;
917pub type PlanarF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 1>;
918pub type CbCrF32ImageStore<'a> = ImageStore<'a, f32, 2>;
919pub type CbCrF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
920pub type GrayAlphaF32ImageStore<'a> = ImageStore<'a, f32, 2>;
921pub type GrayAlphaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
922pub type RgbaF32ImageStore<'a> = ImageStore<'a, f32, 4>;
923pub type RgbaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 4>;
924pub type RgbF32ImageStore<'a> = ImageStore<'a, f32, 3>;
925pub type RgbF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 3>;
926
927#[cfg(test)]
928mod tests {
929 use super::*;
930
931 #[test]
932 fn image_store_alpha_test_rgba8() {
933 let image_size = 256usize;
934 let mut image = vec![0u8; image_size * image_size * 4];
935 image[3 + 150 * 4] = 75;
936 let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
937 let has_alpha = store.is_alpha_premultiplication_needed();
938 assert_eq!(true, has_alpha);
939 }
940
941 #[test]
942 fn check_alpha_not_exists_rgba8() {
943 let image_size = 256usize;
944 let image = vec![255u8; image_size * image_size * 4];
945 let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
946 let has_alpha = store.is_alpha_premultiplication_needed();
947 assert_eq!(false, has_alpha);
948 }
949}