Skip to main content

pic_scale/
image_store.rs

1/*
2 * Copyright (c) Radzivon Bartoshyk. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without modification,
5 * are permitted provided that the following conditions are met:
6 *
7 * 1.  Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * 2.  Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * 3.  Neither the name of the copyright holder nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29use crate::alpha_check::has_non_constant_cap_alpha_rgba_f32;
30#[cfg(feature = "nightly_f16")]
31use crate::alpha_handle_f16::{premultiply_alpha_rgba_f16, unpremultiply_alpha_rgba_f16};
32use crate::alpha_handle_f32::{premultiply_alpha_rgba_f32, unpremultiply_alpha_rgba_f32};
33use crate::alpha_handle_u8::{premultiply_alpha_rgba, unpremultiply_alpha_rgba};
34use crate::alpha_handle_u16::{premultiply_alpha_rgba_u16, unpremultiply_alpha_rgba_u16};
35use crate::support::check_image_size_overflow;
36use crate::validation::{PicScaleBufferMismatch, PicScaleError, try_vec};
37use crate::{ImageSize, WorkloadStrategy};
38#[cfg(feature = "nightly_f16")]
39use core::f16;
40use std::fmt::Debug;
41
42/// Holds an image
43///
44/// # Arguments
45/// `N` - count of channels
46///
47/// # Examples
48/// ImageStore<u8, 4> - represents RGBA
49/// ImageStore<u8, 3> - represents RGB
50/// ImageStore<f32, 3> - represents RGB in f32 and etc
51#[derive(Clone)]
52pub struct ImageStore<'a, T, const N: usize>
53where
54    [T]: ToOwned<Owned = Vec<T>>,
55{
56    pub buffer: std::borrow::Cow<'a, [T]>,
57    /// Channels in the image
58    pub channels: usize,
59    /// Image width
60    pub width: usize,
61    /// Image height
62    pub height: usize,
63    /// Image stride, if stride is zero then it considered to be `width * N`
64    pub stride: usize,
65    /// This is private field, currently used only for u16, will be automatically passed from upper func
66    pub bit_depth: usize,
67}
68
69/// Holds an image
70///
71/// # Arguments
72/// `N` - count of channels
73///
74/// # Examples
75/// ImageStore<u8, 4> - represents RGBA
76/// ImageStore<u8, 3> - represents RGB
77/// ImageStore<f32, 3> - represents RGB in f32 and etc
78pub struct ImageStoreMut<'a, T, const N: usize> {
79    pub buffer: BufferStore<'a, T>,
80    /// Channels in the image
81    pub channels: usize,
82    /// Image width
83    pub width: usize,
84    /// Image height
85    pub height: usize,
86    /// Image stride, if stride is zero then it considered to be `width * N`
87    pub stride: usize,
88    /// Required for `u16` images
89    pub bit_depth: usize,
90}
91
92pub(crate) trait CheckStoreDensity {
93    fn should_have_bit_depth(&self) -> bool;
94}
95
96/// Structure for mutable target buffer
97pub enum BufferStore<'a, T> {
98    Borrowed(&'a mut [T]),
99    Owned(Vec<T>),
100}
101
102impl<T> BufferStore<'_, T> {
103    #[allow(clippy::should_implement_trait)]
104    /// Borrowing immutable slice
105    pub fn borrow(&self) -> &[T] {
106        match self {
107            Self::Borrowed(p_ref) => p_ref,
108            Self::Owned(vec) => vec,
109        }
110    }
111
112    #[allow(clippy::should_implement_trait)]
113    /// Borrowing mutable slice
114    pub fn borrow_mut(&mut self) -> &mut [T] {
115        match self {
116            Self::Borrowed(p_ref) => p_ref,
117            Self::Owned(vec) => vec,
118        }
119    }
120}
121
122impl<'a, T, const N: usize> ImageStore<'a, T, N>
123where
124    T: Clone + Copy + Debug + Default,
125{
126    /// Creates new store
127    pub fn new(
128        slice_ref: Vec<T>,
129        width: usize,
130        height: usize,
131    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
132        let expected_size = width * height * N;
133        if slice_ref.len() != width * height * N {
134            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
135                expected: expected_size,
136                width,
137                height,
138                channels: N,
139                slice_len: slice_ref.len(),
140            }));
141        }
142        Ok(ImageStore::<T, N> {
143            buffer: std::borrow::Cow::Owned(slice_ref),
144            channels: N,
145            width,
146            height,
147            stride: width * N,
148            bit_depth: 0,
149        })
150    }
151
152    /// Borrows immutable slice as new image store
153    pub fn borrow(
154        slice_ref: &'a [T],
155        width: usize,
156        height: usize,
157    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
158        let expected_size = width * height * N;
159        if slice_ref.len() != width * height * N {
160            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
161                expected: expected_size,
162                width,
163                height,
164                channels: N,
165                slice_len: slice_ref.len(),
166            }));
167        }
168        Ok(ImageStore::<T, N> {
169            buffer: std::borrow::Cow::Borrowed(slice_ref),
170            channels: N,
171            width,
172            height,
173            stride: width * N,
174            bit_depth: 0,
175        })
176    }
177
178    /// Allocates new owned image store
179    pub fn alloc(width: usize, height: usize) -> ImageStore<'a, T, N> {
180        let vc = vec![T::default(); width * N * height];
181        ImageStore::<T, N> {
182            buffer: std::borrow::Cow::Owned(vc),
183            channels: N,
184            width,
185            height,
186            stride: width * N,
187            bit_depth: 0,
188        }
189    }
190}
191
192impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u8, N> {
193    fn should_have_bit_depth(&self) -> bool {
194        false
195    }
196}
197
198impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f32, N> {
199    fn should_have_bit_depth(&self) -> bool {
200        false
201    }
202}
203
204#[cfg(feature = "nightly_f16")]
205impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f16, N> {
206    fn should_have_bit_depth(&self) -> bool {
207        false
208    }
209}
210
211impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u16, N> {
212    fn should_have_bit_depth(&self) -> bool {
213        true
214    }
215}
216
217impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, i16, N> {
218    fn should_have_bit_depth(&self) -> bool {
219        true
220    }
221}
222
223impl<T, const N: usize> ImageStoreMut<'_, T, N> {
224    pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
225        let expected_size = self.stride() * self.height;
226        if self.width == 0 || self.height == 0 {
227            return Err(PicScaleError::ZeroImageDimensions);
228        }
229        if self.buffer.borrow().len() != self.stride() * self.height {
230            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
231                expected: expected_size,
232                width: self.width,
233                height: self.height,
234                channels: N,
235                slice_len: self.buffer.borrow().len(),
236            }));
237        }
238        if self.stride < self.width * N {
239            return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
240        }
241        if check_image_size_overflow(self.width, self.height, self.channels) {
242            return Err(PicScaleError::SourceImageIsTooLarge);
243        }
244        Ok(())
245    }
246}
247
248impl<T, const N: usize> ImageStore<'_, T, N>
249where
250    [T]: ToOwned<Owned = Vec<T>>,
251{
252    pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
253        let expected_size = self.stride() * self.height;
254        if self.width == 0 || self.height == 0 {
255            return Err(PicScaleError::ZeroImageDimensions);
256        }
257        if self.buffer.as_ref().len() != self.stride() * self.height {
258            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
259                expected: expected_size,
260                width: self.width,
261                height: self.height,
262                channels: N,
263                slice_len: self.buffer.as_ref().len(),
264            }));
265        }
266        if self.stride < self.width * N {
267            return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
268        }
269        if check_image_size_overflow(self.width, self.height, self.channels) {
270            return Err(PicScaleError::DestinationImageIsTooLarge);
271        }
272
273        Ok(())
274    }
275}
276
277impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
278where
279    T: Clone + Copy + Debug + Default,
280{
281    /// Creates new mutable storage from vector
282    ///
283    /// Always sets bit-depth to `0`
284    pub fn new(
285        slice_ref: Vec<T>,
286        width: usize,
287        height: usize,
288    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
289        let expected_size = width * height * N;
290        if slice_ref.len() != width * height * N {
291            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
292                expected: expected_size,
293                width,
294                height,
295                channels: N,
296                slice_len: slice_ref.len(),
297            }));
298        }
299        Ok(ImageStoreMut::<T, N> {
300            buffer: BufferStore::Owned(slice_ref),
301            channels: N,
302            width,
303            height,
304            stride: width * N,
305            bit_depth: 0,
306        })
307    }
308
309    /// Creates new mutable storage from slice
310    ///
311    /// Always sets bit-depth to `0`
312    pub fn borrow(
313        slice_ref: &'a mut [T],
314        width: usize,
315        height: usize,
316    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
317        let expected_size = width * height * N;
318        if slice_ref.len() != width * height * N {
319            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
320                expected: expected_size,
321                width,
322                height,
323                channels: N,
324                slice_len: slice_ref.len(),
325            }));
326        }
327        Ok(ImageStoreMut::<T, N> {
328            buffer: BufferStore::Borrowed(slice_ref),
329            channels: N,
330            width,
331            height,
332            stride: width * N,
333            bit_depth: 0,
334        })
335    }
336
337    /// Allocates new mutable image storage
338    ///
339    /// Always sets bit depth to `0`
340    pub fn alloc(width: usize, height: usize) -> ImageStoreMut<'a, T, N> {
341        let vc = vec![T::default(); width * N * height];
342        ImageStoreMut::<T, N> {
343            buffer: BufferStore::Owned(vc),
344            channels: N,
345            width,
346            height,
347            stride: width * N,
348            bit_depth: 0,
349        }
350    }
351
352    /// Tries to allocate new mutable image storage
353    ///
354    /// Always sets bit depth to `0`
355    pub fn try_alloc(
356        width: usize,
357        height: usize,
358    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
359        let vc = try_vec![T::default(); width * N * height];
360        Ok(ImageStoreMut::<T, N> {
361            buffer: BufferStore::Owned(vc),
362            channels: N,
363            width,
364            height,
365            stride: width * N,
366            bit_depth: 0,
367        })
368    }
369
370    /// Allocates new mutable image storage with required bit-depth
371    pub fn alloc_with_depth(
372        width: usize,
373        height: usize,
374        bit_depth: usize,
375    ) -> ImageStoreMut<'a, T, N> {
376        let vc = vec![T::default(); width * N * height];
377        ImageStoreMut::<T, N> {
378            buffer: BufferStore::Owned(vc),
379            channels: N,
380            width,
381            height,
382            stride: width * N,
383            bit_depth,
384        }
385    }
386
387    /// Tries to allocate new mutable image storage with required bit-depth
388    pub fn try_alloc_with_depth(
389        width: usize,
390        height: usize,
391        bit_depth: usize,
392    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
393        let vc = try_vec![T::default(); width * N * height];
394        Ok(ImageStoreMut::<T, N> {
395            buffer: BufferStore::Owned(vc),
396            channels: N,
397            width,
398            height,
399            stride: width * N,
400            bit_depth,
401        })
402    }
403}
404
405impl<T, const N: usize> ImageStoreMut<'_, T, N> {
406    /// Returns safe stride
407    ///
408    /// If stride set to 0 then returns `width * N`
409    #[inline]
410    pub fn stride(&self) -> usize {
411        if self.stride == 0 {
412            return self.width * N;
413        }
414        self.stride
415    }
416}
417
418impl<T, const N: usize> ImageStore<'_, T, N>
419where
420    [T]: ToOwned<Owned = Vec<T>>,
421{
422    /// Returns safe stride
423    ///
424    /// If stride set to 0 then returns `width * N`
425    #[inline]
426    pub fn stride(&self) -> usize {
427        if self.stride == 0 {
428            return self.width * N;
429        }
430        self.stride
431    }
432}
433
434impl<'a, T, const N: usize> ImageStore<'a, T, N>
435where
436    T: Clone + Copy + Debug,
437{
438    /// Returns bounded image size
439    pub fn size(&self) -> ImageSize {
440        ImageSize::new(self.width, self.height)
441    }
442
443    /// Returns current image store as immutable slice
444    pub fn as_bytes(&self) -> &[T] {
445        match &self.buffer {
446            std::borrow::Cow::Borrowed(br) => br,
447            std::borrow::Cow::Owned(v) => v.as_ref(),
448        }
449    }
450
451    /// Borrows immutable slice int oa new image store
452    pub fn from_slice(
453        slice_ref: &'a [T],
454        width: usize,
455        height: usize,
456    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
457        let expected_size = width * height * N;
458        if slice_ref.len() != width * height * N {
459            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
460                expected: expected_size,
461                width,
462                height,
463                channels: N,
464                slice_len: slice_ref.len(),
465            }));
466        }
467        Ok(ImageStore::<T, N> {
468            buffer: std::borrow::Cow::Borrowed(slice_ref),
469            channels: N,
470            width,
471            height,
472            stride: width * N,
473            bit_depth: 0,
474        })
475    }
476
477    /// Deep copy immutable image store into a new immutable store
478    pub fn copied<'b>(&self) -> ImageStore<'b, T, N> {
479        ImageStore::<T, N> {
480            buffer: std::borrow::Cow::Owned(self.buffer.as_ref().to_vec()),
481            channels: N,
482            width: self.width,
483            height: self.height,
484            stride: self.width * N,
485            bit_depth: self.bit_depth,
486        }
487    }
488
489    /// Deep copy immutable image into mutable
490    pub fn copied_to_mut(&self, into: &mut ImageStoreMut<T, N>) {
491        let into_stride = into.stride();
492        for (src_row, dst_row) in self
493            .buffer
494            .as_ref()
495            .chunks_exact(self.stride())
496            .zip(into.buffer.borrow_mut().chunks_exact_mut(into_stride))
497        {
498            for (&src, dst) in src_row.iter().zip(dst_row.iter_mut()) {
499                *dst = src;
500            }
501        }
502    }
503}
504
505impl<'a, T, const N: usize> ImageStoreMut<'a, T, N> {
506    /// Returns bounded image size
507    pub fn size(&self) -> ImageSize {
508        ImageSize::new(self.width, self.height)
509    }
510
511    /// Returns current image as immutable slice
512    pub fn as_bytes(&self) -> &[T] {
513        match &self.buffer {
514            BufferStore::Borrowed(p) => p,
515            BufferStore::Owned(v) => v,
516        }
517    }
518
519    /// Borrows mutable slice as new image store
520    pub fn from_slice(
521        slice_ref: &'a mut [T],
522        width: usize,
523        height: usize,
524    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
525        let expected_size = width * height * N;
526        if slice_ref.len() != width * height * N {
527            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
528                expected: expected_size,
529                width,
530                height,
531                channels: N,
532                slice_len: slice_ref.len(),
533            }));
534        }
535        Ok(ImageStoreMut::<T, N> {
536            buffer: BufferStore::Borrowed(slice_ref),
537            channels: N,
538            width,
539            height,
540            stride: width * N,
541            bit_depth: 0,
542        })
543    }
544}
545
546impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
547where
548    T: Clone,
549{
550    /// Performs deep copy into a new mutable image
551    pub fn copied<'b>(&self) -> ImageStoreMut<'b, T, N> {
552        ImageStoreMut::<T, N> {
553            buffer: BufferStore::Owned(self.buffer.borrow().to_vec()),
554            channels: N,
555            width: self.width,
556            height: self.height,
557            stride: self.width * N,
558            bit_depth: self.bit_depth,
559        }
560    }
561
562    /// Performs deep copy into a new immutable image
563    pub fn to_immutable(&self) -> ImageStore<'_, T, N> {
564        ImageStore::<T, N> {
565            buffer: std::borrow::Cow::Borrowed(self.buffer.borrow()),
566            channels: N,
567            width: self.width,
568            height: self.height,
569            stride: self.width * N,
570            bit_depth: self.bit_depth,
571        }
572    }
573}
574
575pub(crate) trait AssociateAlpha<T: Clone + Copy + Debug, const N: usize> {
576    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, T, N>, pool: &novtb::ThreadPool);
577    fn is_alpha_premultiplication_needed(&self) -> bool;
578}
579
580pub(crate) trait UnassociateAlpha<T: Clone + Copy + Debug, const N: usize> {
581    fn unpremultiply_alpha(
582        &mut self,
583        pool: &novtb::ThreadPool,
584        workload_strategy: WorkloadStrategy,
585    );
586}
587
588impl AssociateAlpha<u8, 2> for ImageStore<'_, u8, 2> {
589    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 2>, pool: &novtb::ThreadPool) {
590        let dst_stride = into.stride();
591        let dst = into.buffer.borrow_mut();
592        let src = self.buffer.as_ref();
593        use crate::alpha_handle_u8::premultiply_alpha_gray_alpha;
594        premultiply_alpha_gray_alpha(
595            dst,
596            dst_stride,
597            src,
598            self.width,
599            self.height,
600            self.stride(),
601            pool,
602        );
603    }
604
605    fn is_alpha_premultiplication_needed(&self) -> bool {
606        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha8;
607        has_non_constant_cap_alpha_gray_alpha8(self.buffer.as_ref(), self.width, self.stride())
608    }
609}
610
611impl AssociateAlpha<u16, 2> for ImageStore<'_, u16, 2> {
612    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 2>, pool: &novtb::ThreadPool) {
613        let dst_stride = into.stride();
614        let dst = into.buffer.borrow_mut();
615        let src = self.buffer.as_ref();
616        use crate::alpha_handle_u16::premultiply_alpha_gray_alpha_u16;
617        premultiply_alpha_gray_alpha_u16(
618            dst,
619            dst_stride,
620            src,
621            self.width,
622            self.height,
623            self.stride(),
624            into.bit_depth,
625            pool,
626        );
627    }
628
629    fn is_alpha_premultiplication_needed(&self) -> bool {
630        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha16;
631        has_non_constant_cap_alpha_gray_alpha16(self.buffer.as_ref(), self.width, self.stride())
632    }
633}
634
635impl AssociateAlpha<f32, 2> for ImageStore<'_, f32, 2> {
636    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 2>, pool: &novtb::ThreadPool) {
637        let dst_stride = into.stride();
638        let dst = into.buffer.borrow_mut();
639        let src = self.buffer.as_ref();
640        use crate::alpha_handle_f32::premultiply_alpha_gray_alpha_f32;
641        premultiply_alpha_gray_alpha_f32(
642            dst,
643            dst_stride,
644            src,
645            self.stride(),
646            self.width,
647            self.height,
648            pool,
649        );
650    }
651
652    fn is_alpha_premultiplication_needed(&self) -> bool {
653        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha_f32;
654        has_non_constant_cap_alpha_gray_alpha_f32(self.buffer.as_ref(), self.width, self.stride())
655    }
656}
657
658impl AssociateAlpha<u8, 4> for ImageStore<'_, u8, 4> {
659    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 4>, pool: &novtb::ThreadPool) {
660        let dst_stride = into.stride();
661        let dst = into.buffer.borrow_mut();
662        let src = self.buffer.as_ref();
663        premultiply_alpha_rgba(
664            dst,
665            dst_stride,
666            src,
667            self.width,
668            self.height,
669            self.stride(),
670            pool,
671        );
672    }
673
674    #[cfg(not(any(
675        any(target_arch = "x86_64", target_arch = "x86"),
676        all(target_arch = "aarch64", feature = "neon")
677    )))]
678    fn is_alpha_premultiplication_needed(&self) -> bool {
679        use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
680        has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
681    }
682
683    #[cfg(all(target_arch = "aarch64", feature = "neon"))]
684    fn is_alpha_premultiplication_needed(&self) -> bool {
685        use crate::neon::neon_has_non_constant_cap_alpha_rgba8;
686        neon_has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
687    }
688
689    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
690    fn is_alpha_premultiplication_needed(&self) -> bool {
691        use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
692        #[cfg(feature = "sse")]
693        use crate::sse::sse_has_non_constant_cap_alpha_rgba8;
694        #[cfg(all(target_arch = "x86_64", feature = "avx512"))]
695        if std::arch::is_x86_feature_detected!("avx512bw") {
696            use crate::avx512::avx512_has_non_constant_cap_alpha_rgba8;
697            return avx512_has_non_constant_cap_alpha_rgba8(
698                self.buffer.as_ref(),
699                self.width,
700                self.stride(),
701            );
702        }
703        #[cfg(all(target_arch = "x86_64", feature = "avx"))]
704        if std::arch::is_x86_feature_detected!("avx2") {
705            use crate::avx2::avx_has_non_constant_cap_alpha_rgba8;
706            return avx_has_non_constant_cap_alpha_rgba8(
707                self.buffer.as_ref(),
708                self.width,
709                self.stride(),
710            );
711        }
712        #[cfg(feature = "sse")]
713        if std::arch::is_x86_feature_detected!("sse4.1") {
714            return sse_has_non_constant_cap_alpha_rgba8(
715                self.buffer.as_ref(),
716                self.width,
717                self.stride(),
718            );
719        }
720        has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
721    }
722}
723
724impl UnassociateAlpha<u8, 4> for ImageStoreMut<'_, u8, 4> {
725    fn unpremultiply_alpha(
726        &mut self,
727        pool: &novtb::ThreadPool,
728        workload_strategy: WorkloadStrategy,
729    ) {
730        let src_stride = self.stride();
731        let dst = self.buffer.borrow_mut();
732        unpremultiply_alpha_rgba(
733            dst,
734            self.width,
735            self.height,
736            src_stride,
737            pool,
738            workload_strategy,
739        );
740    }
741}
742
743impl UnassociateAlpha<u8, 2> for ImageStoreMut<'_, u8, 2> {
744    fn unpremultiply_alpha(
745        &mut self,
746        pool: &novtb::ThreadPool,
747        workload_strategy: WorkloadStrategy,
748    ) {
749        let src_stride = self.stride();
750        let dst = self.buffer.borrow_mut();
751        use crate::alpha_handle_u8::unpremultiply_alpha_gray_alpha;
752        unpremultiply_alpha_gray_alpha(
753            dst,
754            self.width,
755            self.height,
756            src_stride,
757            pool,
758            workload_strategy,
759        );
760    }
761}
762
763impl UnassociateAlpha<f32, 2> for ImageStoreMut<'_, f32, 2> {
764    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
765        let src_stride = self.stride();
766        let dst = self.buffer.borrow_mut();
767        use crate::alpha_handle_f32::unpremultiply_alpha_gray_alpha_f32;
768        unpremultiply_alpha_gray_alpha_f32(dst, src_stride, self.width, self.height, pool);
769    }
770}
771
772impl UnassociateAlpha<u16, 2> for ImageStoreMut<'_, u16, 2> {
773    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
774        let src_stride = self.stride();
775        let dst = self.buffer.borrow_mut();
776        use crate::alpha_handle_u16::unpremultiply_alpha_gray_alpha_u16;
777        unpremultiply_alpha_gray_alpha_u16(
778            dst,
779            src_stride,
780            self.width,
781            self.height,
782            self.bit_depth,
783            pool,
784        );
785    }
786}
787
788impl AssociateAlpha<u16, 4> for ImageStore<'_, u16, 4> {
789    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 4>, pool: &novtb::ThreadPool) {
790        let dst_stride = into.stride();
791        let dst = into.buffer.borrow_mut();
792        let src = self.buffer.as_ref();
793        premultiply_alpha_rgba_u16(
794            dst,
795            dst_stride,
796            src,
797            self.width,
798            self.height,
799            self.stride(),
800            into.bit_depth,
801            pool,
802        );
803    }
804
805    #[cfg(not(any(
806        any(target_arch = "x86_64", target_arch = "x86"),
807        all(target_arch = "aarch64", feature = "neon")
808    )))]
809    fn is_alpha_premultiplication_needed(&self) -> bool {
810        use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
811        has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
812    }
813
814    #[cfg(all(target_arch = "aarch64", feature = "neon"))]
815    fn is_alpha_premultiplication_needed(&self) -> bool {
816        use crate::neon::neon_has_non_constant_cap_alpha_rgba16;
817        neon_has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
818    }
819
820    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
821    fn is_alpha_premultiplication_needed(&self) -> bool {
822        use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
823        #[cfg(feature = "sse")]
824        use crate::sse::sse_has_non_constant_cap_alpha_rgba16;
825        #[cfg(all(target_arch = "x86_64", feature = "avx"))]
826        if std::arch::is_x86_feature_detected!("avx2") {
827            use crate::avx2::avx_has_non_constant_cap_alpha_rgba16;
828            return avx_has_non_constant_cap_alpha_rgba16(
829                self.buffer.as_ref(),
830                self.width,
831                self.stride(),
832            );
833        }
834        #[cfg(feature = "sse")]
835        if std::arch::is_x86_feature_detected!("sse4.1") {
836            return sse_has_non_constant_cap_alpha_rgba16(
837                self.buffer.as_ref(),
838                self.width,
839                self.stride(),
840            );
841        }
842        has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
843    }
844}
845
846impl AssociateAlpha<f32, 4> for ImageStore<'_, f32, 4> {
847    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 4>, pool: &novtb::ThreadPool) {
848        let src_stride = self.stride();
849        let dst_stride = into.stride();
850        let dst = into.buffer.borrow_mut();
851        let src = self.buffer.as_ref();
852        premultiply_alpha_rgba_f32(
853            dst,
854            dst_stride,
855            src,
856            src_stride,
857            self.width,
858            self.height,
859            pool,
860        );
861    }
862
863    fn is_alpha_premultiplication_needed(&self) -> bool {
864        has_non_constant_cap_alpha_rgba_f32(self.buffer.as_ref(), self.width, self.stride())
865    }
866}
867
868#[cfg(feature = "nightly_f16")]
869impl AssociateAlpha<f16, 4> for ImageStore<'_, f16, 4> {
870    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f16, 4>, pool: &novtb::ThreadPool) {
871        let src_stride = self.stride();
872        let dst_stride = into.stride();
873        let dst = into.buffer.borrow_mut();
874        let src = self.buffer.as_ref();
875        premultiply_alpha_rgba_f16(
876            dst,
877            dst_stride,
878            src,
879            src_stride,
880            self.width,
881            self.height,
882            pool,
883        );
884    }
885
886    fn is_alpha_premultiplication_needed(&self) -> bool {
887        true
888    }
889}
890
891impl UnassociateAlpha<u16, 4> for ImageStoreMut<'_, u16, 4> {
892    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
893        let src_stride = self.stride();
894        let in_place = self.buffer.borrow_mut();
895        unpremultiply_alpha_rgba_u16(
896            in_place,
897            src_stride,
898            self.width,
899            self.height,
900            self.bit_depth,
901            pool,
902        );
903    }
904}
905
906impl UnassociateAlpha<f32, 4> for ImageStoreMut<'_, f32, 4> {
907    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
908        let stride = self.stride();
909        let dst = self.buffer.borrow_mut();
910        unpremultiply_alpha_rgba_f32(dst, stride, self.width, self.height, pool);
911    }
912}
913
914#[cfg(feature = "nightly_f16")]
915impl UnassociateAlpha<f16, 4> for ImageStoreMut<'_, f16, 4> {
916    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
917        let stride = self.stride();
918        let dst = self.buffer.borrow_mut();
919        unpremultiply_alpha_rgba_f16(dst, stride, self.width, self.height, pool);
920    }
921}
922
923pub type Planar8ImageStore<'a> = ImageStore<'a, u8, 1>;
924pub type Planar8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 1>;
925pub type CbCr8ImageStore<'a> = ImageStore<'a, u8, 2>;
926pub type CbCr8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
927pub type GrayAlpha8ImageStore<'a> = ImageStore<'a, u8, 2>;
928pub type GrayAlpha8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
929pub type Rgba8ImageStore<'a> = ImageStore<'a, u8, 4>;
930pub type Rgba8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 4>;
931pub type Rgb8ImageStore<'a> = ImageStore<'a, u8, 3>;
932pub type Rgb8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 3>;
933
934pub type PlanarS16ImageStore<'a> = ImageStore<'a, i16, 1>;
935pub type PlanarS16ImageStoreMut<'a> = ImageStoreMut<'a, i16, 1>;
936pub type Planar16ImageStore<'a> = ImageStore<'a, u16, 1>;
937pub type Planar16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 1>;
938pub type CbCr16ImageStore<'a> = ImageStore<'a, u16, 2>;
939pub type CbCr16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
940pub type GrayAlpha16ImageStore<'a> = ImageStore<'a, u16, 2>;
941pub type GrayAlpha16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
942pub type Rgba16ImageStore<'a> = ImageStore<'a, u16, 4>;
943pub type Rgba16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 4>;
944pub type Rgb16ImageStore<'a> = ImageStore<'a, u16, 3>;
945pub type Rgb16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 3>;
946
947#[cfg(feature = "nightly_f16")]
948pub type PlanarF16ImageStore<'a> = ImageStore<'a, f16, 1>;
949#[cfg(feature = "nightly_f16")]
950pub type PlanarF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 1>;
951#[cfg(feature = "nightly_f16")]
952pub type CbCrF16ImageStore<'a> = ImageStore<'a, f16, 2>;
953#[cfg(feature = "nightly_f16")]
954pub type CbCrF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 2>;
955#[cfg(feature = "nightly_f16")]
956pub type RgbaF16ImageStore<'a> = ImageStore<'a, f16, 4>;
957#[cfg(feature = "nightly_f16")]
958pub type RgbaF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 4>;
959#[cfg(feature = "nightly_f16")]
960pub type RgbF16ImageStore<'a> = ImageStore<'a, f16, 3>;
961#[cfg(feature = "nightly_f16")]
962pub type RgbF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 3>;
963
964pub type PlanarF32ImageStore<'a> = ImageStore<'a, f32, 1>;
965pub type PlanarF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 1>;
966pub type CbCrF32ImageStore<'a> = ImageStore<'a, f32, 2>;
967pub type CbCrF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
968pub type GrayAlphaF32ImageStore<'a> = ImageStore<'a, f32, 2>;
969pub type GrayAlphaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
970pub type RgbaF32ImageStore<'a> = ImageStore<'a, f32, 4>;
971pub type RgbaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 4>;
972pub type RgbF32ImageStore<'a> = ImageStore<'a, f32, 3>;
973pub type RgbF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 3>;
974
975#[cfg(test)]
976mod tests {
977    use super::*;
978
979    #[test]
980    fn image_store_alpha_test_rgba8() {
981        let image_size = 256usize;
982        let mut image = vec![0u8; image_size * image_size * 4];
983        image[3 + 150 * 4] = 75;
984        let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
985        let has_alpha = store.is_alpha_premultiplication_needed();
986        assert_eq!(true, has_alpha);
987    }
988
989    #[test]
990    fn check_alpha_not_exists_rgba8() {
991        let image_size = 256usize;
992        let image = vec![255u8; image_size * image_size * 4];
993        let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
994        let has_alpha = store.is_alpha_premultiplication_needed();
995        assert_eq!(false, has_alpha);
996    }
997}