pic_scale/
image_store.rs

1/*
2 * Copyright (c) Radzivon Bartoshyk. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without modification,
5 * are permitted provided that the following conditions are met:
6 *
7 * 1.  Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * 2.  Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * 3.  Neither the name of the copyright holder nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29use crate::alpha_check::has_non_constant_cap_alpha_rgba_f32;
30#[cfg(feature = "nightly_f16")]
31use crate::alpha_handle_f16::{premultiply_alpha_rgba_f16, unpremultiply_alpha_rgba_f16};
32use crate::alpha_handle_f32::{premultiply_alpha_rgba_f32, unpremultiply_alpha_rgba_f32};
33use crate::alpha_handle_u8::{premultiply_alpha_rgba, unpremultiply_alpha_rgba};
34use crate::alpha_handle_u16::{premultiply_alpha_rgba_u16, unpremultiply_alpha_rgba_u16};
35use crate::pic_scale_error::{PicScaleBufferMismatch, PicScaleError, try_vec};
36use crate::{ImageSize, WorkloadStrategy};
37#[cfg(feature = "nightly_f16")]
38use core::f16;
39use std::fmt::Debug;
40
41/// Holds an image
42///
43/// # Arguments
44/// `N` - count of channels
45///
46/// # Examples
47/// ImageStore<u8, 4> - represents RGBA
48/// ImageStore<u8, 3> - represents RGB
49/// ImageStore<f32, 3> - represents RGB in f32 and etc
50#[derive(Debug, Clone)]
51pub struct ImageStore<'a, T, const N: usize>
52where
53    T: Clone + Copy + Debug,
54{
55    pub buffer: std::borrow::Cow<'a, [T]>,
56    /// Channels in the image
57    pub channels: usize,
58    /// Image width
59    pub width: usize,
60    /// Image height
61    pub height: usize,
62    /// Image stride, if stride is zero then it considered to be `width * N`
63    pub stride: usize,
64    /// This is private field, currently used only for u16, will be automatically passed from upper func
65    pub bit_depth: usize,
66}
67
68/// Holds an image
69///
70/// # Arguments
71/// `N` - count of channels
72///
73/// # Examples
74/// ImageStore<u8, 4> - represents RGBA
75/// ImageStore<u8, 3> - represents RGB
76/// ImageStore<f32, 3> - represents RGB in f32 and etc
77#[derive(Debug)]
78pub struct ImageStoreMut<'a, T, const N: usize>
79where
80    T: Clone + Copy + Debug,
81{
82    pub buffer: BufferStore<'a, T>,
83    /// Channels in the image
84    pub channels: usize,
85    /// Image width
86    pub width: usize,
87    /// Image height
88    pub height: usize,
89    /// Image stride, if stride is zero then it considered to be `width * N`
90    pub stride: usize,
91    /// Required for `u16` images
92    pub bit_depth: usize,
93}
94
95pub(crate) trait CheckStoreDensity {
96    fn should_have_bit_depth(&self) -> bool;
97}
98
99/// Structure for mutable target buffer
100#[derive(Debug)]
101pub enum BufferStore<'a, T: Copy + Debug> {
102    Borrowed(&'a mut [T]),
103    Owned(Vec<T>),
104}
105
106impl<T: Copy + Debug> BufferStore<'_, T> {
107    #[allow(clippy::should_implement_trait)]
108    /// Borrowing immutable slice
109    pub fn borrow(&self) -> &[T] {
110        match self {
111            Self::Borrowed(p_ref) => p_ref,
112            Self::Owned(vec) => vec,
113        }
114    }
115
116    #[allow(clippy::should_implement_trait)]
117    /// Borrowing mutable slice
118    pub fn borrow_mut(&mut self) -> &mut [T] {
119        match self {
120            Self::Borrowed(p_ref) => p_ref,
121            Self::Owned(vec) => vec,
122        }
123    }
124}
125
126impl<'a, T, const N: usize> ImageStore<'a, T, N>
127where
128    T: Clone + Copy + Debug + Default,
129{
130    /// Creates new store
131    pub fn new(
132        slice_ref: Vec<T>,
133        width: usize,
134        height: usize,
135    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
136        let expected_size = width * height * N;
137        if slice_ref.len() != width * height * N {
138            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
139                expected: expected_size,
140                width,
141                height,
142                channels: N,
143                slice_len: slice_ref.len(),
144            }));
145        }
146        Ok(ImageStore::<T, N> {
147            buffer: std::borrow::Cow::Owned(slice_ref),
148            channels: N,
149            width,
150            height,
151            stride: width * N,
152            bit_depth: 0,
153        })
154    }
155
156    /// Borrows immutable slice as new image store
157    pub fn borrow(
158        slice_ref: &'a [T],
159        width: usize,
160        height: usize,
161    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
162        let expected_size = width * height * N;
163        if slice_ref.len() != width * height * N {
164            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
165                expected: expected_size,
166                width,
167                height,
168                channels: N,
169                slice_len: slice_ref.len(),
170            }));
171        }
172        Ok(ImageStore::<T, N> {
173            buffer: std::borrow::Cow::Borrowed(slice_ref),
174            channels: N,
175            width,
176            height,
177            stride: width * N,
178            bit_depth: 0,
179        })
180    }
181
182    /// Allocates new owned image store
183    pub fn alloc(width: usize, height: usize) -> ImageStore<'a, T, N> {
184        let vc = vec![T::default(); width * N * height];
185        ImageStore::<T, N> {
186            buffer: std::borrow::Cow::Owned(vc),
187            channels: N,
188            width,
189            height,
190            stride: width * N,
191            bit_depth: 0,
192        }
193    }
194}
195
196impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u8, N> {
197    fn should_have_bit_depth(&self) -> bool {
198        false
199    }
200}
201
202impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f32, N> {
203    fn should_have_bit_depth(&self) -> bool {
204        false
205    }
206}
207
208#[cfg(feature = "nightly_f16")]
209impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f16, N> {
210    fn should_have_bit_depth(&self) -> bool {
211        false
212    }
213}
214
215impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u16, N> {
216    fn should_have_bit_depth(&self) -> bool {
217        true
218    }
219}
220
221impl<T, const N: usize> ImageStoreMut<'_, T, N>
222where
223    T: Clone + Copy + Debug + Default,
224{
225    pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
226        let expected_size = self.stride() * self.height;
227        if self.buffer.borrow().len() != self.stride() * self.height {
228            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
229                expected: expected_size,
230                width: self.width,
231                height: self.height,
232                channels: N,
233                slice_len: self.buffer.borrow().len(),
234            }));
235        }
236        if self.stride < self.width * N {
237            return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
238        }
239        Ok(())
240    }
241}
242
243impl<T, const N: usize> ImageStore<'_, T, N>
244where
245    T: Clone + Copy + Debug + Default,
246{
247    pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
248        let expected_size = self.stride() * self.height;
249        if self.buffer.as_ref().len() != self.stride() * self.height {
250            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
251                expected: expected_size,
252                width: self.width,
253                height: self.height,
254                channels: N,
255                slice_len: self.buffer.as_ref().len(),
256            }));
257        }
258        if self.stride < self.width * N {
259            return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
260        }
261        Ok(())
262    }
263}
264
265impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
266where
267    T: Clone + Copy + Debug + Default,
268{
269    /// Creates new mutable storage from vector
270    ///
271    /// Always sets bit-depth to `0`
272    pub fn new(
273        slice_ref: Vec<T>,
274        width: usize,
275        height: usize,
276    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
277        let expected_size = width * height * N;
278        if slice_ref.len() != width * height * N {
279            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
280                expected: expected_size,
281                width,
282                height,
283                channels: N,
284                slice_len: slice_ref.len(),
285            }));
286        }
287        Ok(ImageStoreMut::<T, N> {
288            buffer: BufferStore::Owned(slice_ref),
289            channels: N,
290            width,
291            height,
292            stride: width * N,
293            bit_depth: 0,
294        })
295    }
296
297    /// Creates new mutable storage from slice
298    ///
299    /// Always sets bit-depth to `0`
300    pub fn borrow(
301        slice_ref: &'a mut [T],
302        width: usize,
303        height: usize,
304    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
305        let expected_size = width * height * N;
306        if slice_ref.len() != width * height * N {
307            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
308                expected: expected_size,
309                width,
310                height,
311                channels: N,
312                slice_len: slice_ref.len(),
313            }));
314        }
315        Ok(ImageStoreMut::<T, N> {
316            buffer: BufferStore::Borrowed(slice_ref),
317            channels: N,
318            width,
319            height,
320            stride: width * N,
321            bit_depth: 0,
322        })
323    }
324
325    /// Allocates new mutable image storage
326    ///
327    /// Always sets bit depth to `0`
328    pub fn alloc(width: usize, height: usize) -> ImageStoreMut<'a, T, N> {
329        let vc = vec![T::default(); width * N * height];
330        ImageStoreMut::<T, N> {
331            buffer: BufferStore::Owned(vc),
332            channels: N,
333            width,
334            height,
335            stride: width * N,
336            bit_depth: 0,
337        }
338    }
339
340    /// Tries to allocate new mutable image storage
341    ///
342    /// Always sets bit depth to `0`
343    pub fn try_alloc(
344        width: usize,
345        height: usize,
346    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
347        let vc = try_vec![T::default(); width * N * height];
348        Ok(ImageStoreMut::<T, N> {
349            buffer: BufferStore::Owned(vc),
350            channels: N,
351            width,
352            height,
353            stride: width * N,
354            bit_depth: 0,
355        })
356    }
357
358    /// Allocates new mutable image storage with required bit-depth
359    pub fn alloc_with_depth(
360        width: usize,
361        height: usize,
362        bit_depth: usize,
363    ) -> ImageStoreMut<'a, T, N> {
364        let vc = vec![T::default(); width * N * height];
365        ImageStoreMut::<T, N> {
366            buffer: BufferStore::Owned(vc),
367            channels: N,
368            width,
369            height,
370            stride: width * N,
371            bit_depth,
372        }
373    }
374
375    /// Tries to allocate new mutable image storage with required bit-depth
376    pub fn try_alloc_with_depth(
377        width: usize,
378        height: usize,
379        bit_depth: usize,
380    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
381        let vc = try_vec![T::default(); width * N * height];
382        Ok(ImageStoreMut::<T, N> {
383            buffer: BufferStore::Owned(vc),
384            channels: N,
385            width,
386            height,
387            stride: width * N,
388            bit_depth,
389        })
390    }
391}
392
393impl<T, const N: usize> ImageStoreMut<'_, T, N>
394where
395    T: Clone + Copy + Debug,
396{
397    /// Returns safe stride
398    ///
399    /// If stride set to 0 then returns `width * N`
400    #[inline]
401    pub fn stride(&self) -> usize {
402        if self.stride == 0 {
403            return self.width * N;
404        }
405        self.stride
406    }
407}
408
409impl<T, const N: usize> ImageStore<'_, T, N>
410where
411    T: Clone + Copy + Debug,
412{
413    /// Returns safe stride
414    ///
415    /// If stride set to 0 then returns `width * N`
416    #[inline]
417    pub fn stride(&self) -> usize {
418        if self.stride == 0 {
419            return self.width * N;
420        }
421        self.stride
422    }
423}
424
425impl<'a, T, const N: usize> ImageStore<'a, T, N>
426where
427    T: Clone + Copy + Debug,
428{
429    /// Returns bounded image size
430    pub fn get_size(&self) -> ImageSize {
431        ImageSize::new(self.width, self.height)
432    }
433
434    /// Returns current image store as immutable slice
435    pub fn as_bytes(&self) -> &[T] {
436        match &self.buffer {
437            std::borrow::Cow::Borrowed(br) => br,
438            std::borrow::Cow::Owned(v) => v.as_ref(),
439        }
440    }
441
442    /// Borrows immutable slice int oa new image store
443    pub fn from_slice(
444        slice_ref: &'a [T],
445        width: usize,
446        height: usize,
447    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
448        let expected_size = width * height * N;
449        if slice_ref.len() != width * height * N {
450            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
451                expected: expected_size,
452                width,
453                height,
454                channels: N,
455                slice_len: slice_ref.len(),
456            }));
457        }
458        Ok(ImageStore::<T, N> {
459            buffer: std::borrow::Cow::Borrowed(slice_ref),
460            channels: N,
461            width,
462            height,
463            stride: width * N,
464            bit_depth: 0,
465        })
466    }
467
468    /// Deep copy immutable image store into a new immutable store
469    pub fn copied<'b>(&self) -> ImageStore<'b, T, N> {
470        ImageStore::<T, N> {
471            buffer: std::borrow::Cow::Owned(self.buffer.as_ref().to_vec()),
472            channels: N,
473            width: self.width,
474            height: self.height,
475            stride: self.width * N,
476            bit_depth: self.bit_depth,
477        }
478    }
479
480    /// Deep copy immutable image into mutable
481    pub fn copied_to_mut(&self, into: &mut ImageStoreMut<T, N>) {
482        let into_stride = into.stride();
483        for (src_row, dst_row) in self
484            .buffer
485            .as_ref()
486            .chunks_exact(self.stride())
487            .zip(into.buffer.borrow_mut().chunks_exact_mut(into_stride))
488        {
489            for (&src, dst) in src_row.iter().zip(dst_row.iter_mut()) {
490                *dst = src;
491            }
492        }
493    }
494}
495
496impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
497where
498    T: Clone + Copy + Debug,
499{
500    /// Returns bounded image size
501    pub fn get_size(&self) -> ImageSize {
502        ImageSize::new(self.width, self.height)
503    }
504
505    /// Returns current image as immutable slice
506    pub fn as_bytes(&self) -> &[T] {
507        match &self.buffer {
508            BufferStore::Borrowed(p) => p,
509            BufferStore::Owned(v) => v,
510        }
511    }
512
513    /// Borrows mutable slice as new image store
514    pub fn from_slice(
515        slice_ref: &'a mut [T],
516        width: usize,
517        height: usize,
518    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
519        let expected_size = width * height * N;
520        if slice_ref.len() != width * height * N {
521            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
522                expected: expected_size,
523                width,
524                height,
525                channels: N,
526                slice_len: slice_ref.len(),
527            }));
528        }
529        Ok(ImageStoreMut::<T, N> {
530            buffer: BufferStore::Borrowed(slice_ref),
531            channels: N,
532            width,
533            height,
534            stride: width * N,
535            bit_depth: 0,
536        })
537    }
538
539    /// Performs deep copy into a new mutable image
540    pub fn copied<'b>(&self) -> ImageStoreMut<'b, T, N> {
541        ImageStoreMut::<T, N> {
542            buffer: BufferStore::Owned(self.buffer.borrow().to_vec()),
543            channels: N,
544            width: self.width,
545            height: self.height,
546            stride: self.width * N,
547            bit_depth: self.bit_depth,
548        }
549    }
550
551    /// Performs deep copy into a new immutable image
552    pub fn to_immutable(&self) -> ImageStore<'_, T, N> {
553        ImageStore::<T, N> {
554            buffer: std::borrow::Cow::Owned(self.buffer.borrow().to_owned()),
555            channels: N,
556            width: self.width,
557            height: self.height,
558            stride: self.width * N,
559            bit_depth: self.bit_depth,
560        }
561    }
562}
563
564pub(crate) trait AssociateAlpha<T: Clone + Copy + Debug, const N: usize> {
565    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, T, N>, pool: &novtb::ThreadPool);
566    fn is_alpha_premultiplication_needed(&self) -> bool;
567}
568
569pub(crate) trait UnassociateAlpha<T: Clone + Copy + Debug, const N: usize> {
570    fn unpremultiply_alpha(
571        &mut self,
572        pool: &novtb::ThreadPool,
573        workload_strategy: WorkloadStrategy,
574    );
575}
576
577impl AssociateAlpha<u8, 2> for ImageStore<'_, u8, 2> {
578    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 2>, pool: &novtb::ThreadPool) {
579        let dst_stride = into.stride();
580        let dst = into.buffer.borrow_mut();
581        let src = self.buffer.as_ref();
582        use crate::alpha_handle_u8::premultiply_alpha_gray_alpha;
583        premultiply_alpha_gray_alpha(
584            dst,
585            dst_stride,
586            src,
587            self.width,
588            self.height,
589            self.stride(),
590            pool,
591        );
592    }
593
594    fn is_alpha_premultiplication_needed(&self) -> bool {
595        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha8;
596        has_non_constant_cap_alpha_gray_alpha8(self.buffer.as_ref(), self.width, self.stride())
597    }
598}
599
600impl AssociateAlpha<u16, 2> for ImageStore<'_, u16, 2> {
601    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 2>, pool: &novtb::ThreadPool) {
602        let dst_stride = into.stride();
603        let dst = into.buffer.borrow_mut();
604        let src = self.buffer.as_ref();
605        use crate::alpha_handle_u16::premultiply_alpha_gray_alpha_u16;
606        premultiply_alpha_gray_alpha_u16(
607            dst,
608            dst_stride,
609            src,
610            self.width,
611            self.height,
612            self.stride(),
613            into.bit_depth,
614            pool,
615        );
616    }
617
618    fn is_alpha_premultiplication_needed(&self) -> bool {
619        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha16;
620        has_non_constant_cap_alpha_gray_alpha16(self.buffer.as_ref(), self.width, self.stride())
621    }
622}
623
624impl AssociateAlpha<f32, 2> for ImageStore<'_, f32, 2> {
625    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 2>, pool: &novtb::ThreadPool) {
626        let dst_stride = into.stride();
627        let dst = into.buffer.borrow_mut();
628        let src = self.buffer.as_ref();
629        use crate::alpha_handle_f32::premultiply_alpha_gray_alpha_f32;
630        premultiply_alpha_gray_alpha_f32(
631            dst,
632            dst_stride,
633            src,
634            self.stride(),
635            self.width,
636            self.height,
637            pool,
638        );
639    }
640
641    fn is_alpha_premultiplication_needed(&self) -> bool {
642        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha_f32;
643        has_non_constant_cap_alpha_gray_alpha_f32(self.buffer.as_ref(), self.width, self.stride())
644    }
645}
646
647impl AssociateAlpha<u8, 4> for ImageStore<'_, u8, 4> {
648    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 4>, pool: &novtb::ThreadPool) {
649        let dst_stride = into.stride();
650        let dst = into.buffer.borrow_mut();
651        let src = self.buffer.as_ref();
652        premultiply_alpha_rgba(
653            dst,
654            dst_stride,
655            src,
656            self.width,
657            self.height,
658            self.stride(),
659            pool,
660        );
661    }
662
663    #[cfg(not(any(
664        any(target_arch = "x86_64", target_arch = "x86"),
665        all(target_arch = "aarch64", target_feature = "neon")
666    )))]
667    fn is_alpha_premultiplication_needed(&self) -> bool {
668        use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
669        has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
670    }
671
672    #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
673    fn is_alpha_premultiplication_needed(&self) -> bool {
674        use crate::neon::neon_has_non_constant_cap_alpha_rgba8;
675        neon_has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
676    }
677
678    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
679    fn is_alpha_premultiplication_needed(&self) -> bool {
680        use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
681        #[cfg(feature = "sse")]
682        use crate::sse::sse_has_non_constant_cap_alpha_rgba8;
683        #[cfg(all(target_arch = "x86_64", feature = "nightly_avx512"))]
684        if std::arch::is_x86_feature_detected!("avx512bw") {
685            use crate::avx512::avx512_has_non_constant_cap_alpha_rgba8;
686            return avx512_has_non_constant_cap_alpha_rgba8(
687                self.buffer.as_ref(),
688                self.width,
689                self.stride(),
690            );
691        }
692        #[cfg(all(target_arch = "x86_64", feature = "avx"))]
693        if std::arch::is_x86_feature_detected!("avx2") {
694            use crate::avx2::avx_has_non_constant_cap_alpha_rgba8;
695            return avx_has_non_constant_cap_alpha_rgba8(
696                self.buffer.as_ref(),
697                self.width,
698                self.stride(),
699            );
700        }
701        #[cfg(feature = "sse")]
702        if std::arch::is_x86_feature_detected!("sse4.1") {
703            return sse_has_non_constant_cap_alpha_rgba8(
704                self.buffer.as_ref(),
705                self.width,
706                self.stride(),
707            );
708        }
709        has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
710    }
711}
712
713impl UnassociateAlpha<u8, 4> for ImageStoreMut<'_, u8, 4> {
714    fn unpremultiply_alpha(
715        &mut self,
716        pool: &novtb::ThreadPool,
717        workload_strategy: WorkloadStrategy,
718    ) {
719        let src_stride = self.stride();
720        let dst = self.buffer.borrow_mut();
721        unpremultiply_alpha_rgba(
722            dst,
723            self.width,
724            self.height,
725            src_stride,
726            pool,
727            workload_strategy,
728        );
729    }
730}
731
732impl UnassociateAlpha<u8, 2> for ImageStoreMut<'_, u8, 2> {
733    fn unpremultiply_alpha(
734        &mut self,
735        pool: &novtb::ThreadPool,
736        workload_strategy: WorkloadStrategy,
737    ) {
738        let src_stride = self.stride();
739        let dst = self.buffer.borrow_mut();
740        use crate::alpha_handle_u8::unpremultiply_alpha_gray_alpha;
741        unpremultiply_alpha_gray_alpha(
742            dst,
743            self.width,
744            self.height,
745            src_stride,
746            pool,
747            workload_strategy,
748        );
749    }
750}
751
752impl UnassociateAlpha<f32, 2> for ImageStoreMut<'_, f32, 2> {
753    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
754        let src_stride = self.stride();
755        let dst = self.buffer.borrow_mut();
756        use crate::alpha_handle_f32::unpremultiply_alpha_gray_alpha_f32;
757        unpremultiply_alpha_gray_alpha_f32(dst, src_stride, self.width, self.height, pool);
758    }
759}
760
761impl UnassociateAlpha<u16, 2> for ImageStoreMut<'_, u16, 2> {
762    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
763        let src_stride = self.stride();
764        let dst = self.buffer.borrow_mut();
765        use crate::alpha_handle_u16::unpremultiply_alpha_gray_alpha_u16;
766        unpremultiply_alpha_gray_alpha_u16(
767            dst,
768            src_stride,
769            self.width,
770            self.height,
771            self.bit_depth,
772            pool,
773        );
774    }
775}
776
777impl AssociateAlpha<u16, 4> for ImageStore<'_, u16, 4> {
778    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 4>, pool: &novtb::ThreadPool) {
779        let dst_stride = into.stride();
780        let dst = into.buffer.borrow_mut();
781        let src = self.buffer.as_ref();
782        premultiply_alpha_rgba_u16(
783            dst,
784            dst_stride,
785            src,
786            self.width,
787            self.height,
788            self.stride(),
789            into.bit_depth,
790            pool,
791        );
792    }
793
794    #[cfg(not(any(
795        any(target_arch = "x86_64", target_arch = "x86"),
796        all(target_arch = "aarch64", target_feature = "neon")
797    )))]
798    fn is_alpha_premultiplication_needed(&self) -> bool {
799        use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
800        has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
801    }
802
803    #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
804    fn is_alpha_premultiplication_needed(&self) -> bool {
805        use crate::neon::neon_has_non_constant_cap_alpha_rgba16;
806        neon_has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
807    }
808
809    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
810    fn is_alpha_premultiplication_needed(&self) -> bool {
811        use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
812        #[cfg(feature = "sse")]
813        use crate::sse::sse_has_non_constant_cap_alpha_rgba16;
814        #[cfg(all(target_arch = "x86_64", feature = "avx"))]
815        if std::arch::is_x86_feature_detected!("avx2") {
816            use crate::avx2::avx_has_non_constant_cap_alpha_rgba16;
817            return avx_has_non_constant_cap_alpha_rgba16(
818                self.buffer.as_ref(),
819                self.width,
820                self.stride(),
821            );
822        }
823        #[cfg(feature = "sse")]
824        if std::arch::is_x86_feature_detected!("sse4.1") {
825            return sse_has_non_constant_cap_alpha_rgba16(
826                self.buffer.as_ref(),
827                self.width,
828                self.stride(),
829            );
830        }
831        has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
832    }
833}
834
835impl AssociateAlpha<f32, 4> for ImageStore<'_, f32, 4> {
836    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 4>, pool: &novtb::ThreadPool) {
837        let src_stride = self.stride();
838        let dst_stride = into.stride();
839        let dst = into.buffer.borrow_mut();
840        let src = self.buffer.as_ref();
841        premultiply_alpha_rgba_f32(
842            dst,
843            dst_stride,
844            src,
845            src_stride,
846            self.width,
847            self.height,
848            pool,
849        );
850    }
851
852    fn is_alpha_premultiplication_needed(&self) -> bool {
853        has_non_constant_cap_alpha_rgba_f32(self.buffer.as_ref(), self.width, self.stride())
854    }
855}
856
857#[cfg(feature = "nightly_f16")]
858impl AssociateAlpha<f16, 4> for ImageStore<'_, f16, 4> {
859    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f16, 4>, pool: &novtb::ThreadPool) {
860        let src_stride = self.stride();
861        let dst_stride = into.stride();
862        let dst = into.buffer.borrow_mut();
863        let src = self.buffer.as_ref();
864        premultiply_alpha_rgba_f16(
865            dst,
866            dst_stride,
867            src,
868            src_stride,
869            self.width,
870            self.height,
871            pool,
872        );
873    }
874
875    fn is_alpha_premultiplication_needed(&self) -> bool {
876        true
877    }
878}
879
880impl UnassociateAlpha<u16, 4> for ImageStoreMut<'_, u16, 4> {
881    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
882        let src_stride = self.stride();
883        let in_place = self.buffer.borrow_mut();
884        unpremultiply_alpha_rgba_u16(
885            in_place,
886            src_stride,
887            self.width,
888            self.height,
889            self.bit_depth,
890            pool,
891        );
892    }
893}
894
895impl UnassociateAlpha<f32, 4> for ImageStoreMut<'_, f32, 4> {
896    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
897        let stride = self.stride();
898        let dst = self.buffer.borrow_mut();
899        unpremultiply_alpha_rgba_f32(dst, stride, self.width, self.height, pool);
900    }
901}
902
903#[cfg(feature = "nightly_f16")]
904impl UnassociateAlpha<f16, 4> for ImageStoreMut<'_, f16, 4> {
905    fn unpremultiply_alpha(&mut self, pool: &novtb::ThreadPool, _: WorkloadStrategy) {
906        let stride = self.stride();
907        let dst = self.buffer.borrow_mut();
908        unpremultiply_alpha_rgba_f16(dst, stride, self.width, self.height, pool);
909    }
910}
911
912pub type Planar8ImageStore<'a> = ImageStore<'a, u8, 1>;
913pub type Planar8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 1>;
914pub type CbCr8ImageStore<'a> = ImageStore<'a, u8, 2>;
915pub type CbCr8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
916pub type GrayAlpha8ImageStore<'a> = ImageStore<'a, u8, 2>;
917pub type GrayAlpha8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
918pub type Rgba8ImageStore<'a> = ImageStore<'a, u8, 4>;
919pub type Rgba8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 4>;
920pub type Rgb8ImageStore<'a> = ImageStore<'a, u8, 3>;
921pub type Rgb8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 3>;
922
923pub type Planar16ImageStore<'a> = ImageStore<'a, u16, 1>;
924pub type Planar16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 1>;
925pub type CbCr16ImageStore<'a> = ImageStore<'a, u16, 2>;
926pub type CbCr16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
927pub type GrayAlpha16ImageStore<'a> = ImageStore<'a, u16, 2>;
928pub type GrayAlpha16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
929pub type Rgba16ImageStore<'a> = ImageStore<'a, u16, 4>;
930pub type Rgba16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 4>;
931pub type Rgb16ImageStore<'a> = ImageStore<'a, u16, 3>;
932pub type Rgb16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 3>;
933
934#[cfg(feature = "nightly_f16")]
935pub type PlanarF16ImageStore<'a> = ImageStore<'a, f16, 1>;
936#[cfg(feature = "nightly_f16")]
937pub type PlanarF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 1>;
938#[cfg(feature = "nightly_f16")]
939pub type CbCrF16ImageStore<'a> = ImageStore<'a, f16, 2>;
940#[cfg(feature = "nightly_f16")]
941pub type CbCrF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 2>;
942#[cfg(feature = "nightly_f16")]
943pub type RgbaF16ImageStore<'a> = ImageStore<'a, f16, 4>;
944#[cfg(feature = "nightly_f16")]
945pub type RgbaF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 4>;
946#[cfg(feature = "nightly_f16")]
947pub type RgbF16ImageStore<'a> = ImageStore<'a, f16, 3>;
948#[cfg(feature = "nightly_f16")]
949pub type RgbF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 3>;
950
951pub type PlanarF32ImageStore<'a> = ImageStore<'a, f32, 1>;
952pub type PlanarF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 1>;
953pub type CbCrF32ImageStore<'a> = ImageStore<'a, f32, 2>;
954pub type CbCrF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
955pub type GrayAlphaF32ImageStore<'a> = ImageStore<'a, f32, 2>;
956pub type GrayAlphaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
957pub type RgbaF32ImageStore<'a> = ImageStore<'a, f32, 4>;
958pub type RgbaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 4>;
959pub type RgbF32ImageStore<'a> = ImageStore<'a, f32, 3>;
960pub type RgbF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 3>;
961
962#[cfg(test)]
963mod tests {
964    use super::*;
965
966    #[test]
967    fn image_store_alpha_test_rgba8() {
968        let image_size = 256usize;
969        let mut image = vec![0u8; image_size * image_size * 4];
970        image[3 + 150 * 4] = 75;
971        let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
972        let has_alpha = store.is_alpha_premultiplication_needed();
973        assert_eq!(true, has_alpha);
974    }
975
976    #[test]
977    fn check_alpha_not_exists_rgba8() {
978        let image_size = 256usize;
979        let image = vec![255u8; image_size * image_size * 4];
980        let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
981        let has_alpha = store.is_alpha_premultiplication_needed();
982        assert_eq!(false, has_alpha);
983    }
984}