pic_scale/
image_store.rs

1/*
2 * Copyright (c) Radzivon Bartoshyk. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without modification,
5 * are permitted provided that the following conditions are met:
6 *
7 * 1.  Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * 2.  Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * 3.  Neither the name of the copyright holder nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29use crate::alpha_check::has_non_constant_cap_alpha_rgba_f32;
30#[cfg(feature = "nightly_f16")]
31use crate::alpha_handle_f16::{premultiply_alpha_rgba_f16, unpremultiply_alpha_rgba_f16};
32use crate::alpha_handle_f32::{premultiply_alpha_rgba_f32, unpremultiply_alpha_rgba_f32};
33use crate::alpha_handle_u8::{premultiply_alpha_rgba, unpremultiply_alpha_rgba};
34use crate::alpha_handle_u16::{premultiply_alpha_rgba_u16, unpremultiply_alpha_rgba_u16};
35use crate::pic_scale_error::{PicScaleBufferMismatch, PicScaleError};
36use crate::{ImageSize, WorkloadStrategy};
37#[cfg(feature = "nightly_f16")]
38use core::f16;
39use rayon::ThreadPool;
40use std::fmt::Debug;
41
42/// Holds an image
43///
44/// # Arguments
45/// `N` - count of channels
46///
47/// # Examples
48/// ImageStore<u8, 4> - represents RGBA
49/// ImageStore<u8, 3> - represents RGB
50/// ImageStore<f32, 3> - represents RGB in f32 and etc
51#[derive(Debug, Clone)]
52pub struct ImageStore<'a, T, const N: usize>
53where
54    T: Clone + Copy + Debug,
55{
56    pub buffer: std::borrow::Cow<'a, [T]>,
57    /// Channels in the image
58    pub channels: usize,
59    /// Image width
60    pub width: usize,
61    /// Image height
62    pub height: usize,
63    /// Image stride, if stride is zero then it considered to be `width * N`
64    pub stride: usize,
65    /// This is private field, currently used only for u16, will be automatically passed from upper func
66    pub bit_depth: usize,
67}
68
69/// Holds an image
70///
71/// # Arguments
72/// `N` - count of channels
73///
74/// # Examples
75/// ImageStore<u8, 4> - represents RGBA
76/// ImageStore<u8, 3> - represents RGB
77/// ImageStore<f32, 3> - represents RGB in f32 and etc
78#[derive(Debug)]
79pub struct ImageStoreMut<'a, T, const N: usize>
80where
81    T: Clone + Copy + Debug,
82{
83    pub buffer: BufferStore<'a, T>,
84    /// Channels in the image
85    pub channels: usize,
86    /// Image width
87    pub width: usize,
88    /// Image height
89    pub height: usize,
90    /// Image stride, if stride is zero then it considered to be `width * N`
91    pub stride: usize,
92    /// Required for `u16` images
93    pub bit_depth: usize,
94}
95
96pub(crate) trait CheckStoreDensity {
97    fn should_have_bit_depth(&self) -> bool;
98}
99
100/// Structure for mutable target buffer
101#[derive(Debug)]
102pub enum BufferStore<'a, T: Copy + Debug> {
103    Borrowed(&'a mut [T]),
104    Owned(Vec<T>),
105}
106
107impl<T: Copy + Debug> BufferStore<'_, T> {
108    #[allow(clippy::should_implement_trait)]
109    /// Borrowing immutable slice
110    pub fn borrow(&self) -> &[T] {
111        match self {
112            Self::Borrowed(p_ref) => p_ref,
113            Self::Owned(vec) => vec,
114        }
115    }
116
117    #[allow(clippy::should_implement_trait)]
118    /// Borrowing mutable slice
119    pub fn borrow_mut(&mut self) -> &mut [T] {
120        match self {
121            Self::Borrowed(p_ref) => p_ref,
122            Self::Owned(vec) => vec,
123        }
124    }
125}
126
127impl<'a, T, const N: usize> ImageStore<'a, T, N>
128where
129    T: Clone + Copy + Debug + Default,
130{
131    /// Creates new store
132    pub fn new(
133        slice_ref: Vec<T>,
134        width: usize,
135        height: usize,
136    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
137        let expected_size = width * height * N;
138        if slice_ref.len() != width * height * N {
139            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
140                expected: expected_size,
141                width,
142                height,
143                channels: N,
144                slice_len: slice_ref.len(),
145            }));
146        }
147        Ok(ImageStore::<T, N> {
148            buffer: std::borrow::Cow::Owned(slice_ref),
149            channels: N,
150            width,
151            height,
152            stride: width * N,
153            bit_depth: 0,
154        })
155    }
156
157    /// Borrows immutable slice as new image store
158    pub fn borrow(
159        slice_ref: &'a [T],
160        width: usize,
161        height: usize,
162    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
163        let expected_size = width * height * N;
164        if slice_ref.len() != width * height * N {
165            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
166                expected: expected_size,
167                width,
168                height,
169                channels: N,
170                slice_len: slice_ref.len(),
171            }));
172        }
173        Ok(ImageStore::<T, N> {
174            buffer: std::borrow::Cow::Borrowed(slice_ref),
175            channels: N,
176            width,
177            height,
178            stride: width * N,
179            bit_depth: 0,
180        })
181    }
182
183    /// Allocates new owned image store
184    pub fn alloc(width: usize, height: usize) -> ImageStore<'a, T, N> {
185        let vc = vec![T::default(); width * N * height];
186        ImageStore::<T, N> {
187            buffer: std::borrow::Cow::Owned(vc),
188            channels: N,
189            width,
190            height,
191            stride: width * N,
192            bit_depth: 0,
193        }
194    }
195}
196
197impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u8, N> {
198    fn should_have_bit_depth(&self) -> bool {
199        false
200    }
201}
202
203impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f32, N> {
204    fn should_have_bit_depth(&self) -> bool {
205        false
206    }
207}
208
209#[cfg(feature = "nightly_f16")]
210impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f16, N> {
211    fn should_have_bit_depth(&self) -> bool {
212        false
213    }
214}
215
216impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u16, N> {
217    fn should_have_bit_depth(&self) -> bool {
218        true
219    }
220}
221
222impl<T, const N: usize> ImageStoreMut<'_, T, N>
223where
224    T: Clone + Copy + Debug + Default,
225{
226    pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
227        let expected_size = self.stride() * self.height;
228        if self.buffer.borrow().len() != self.stride() * self.height {
229            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
230                expected: expected_size,
231                width: self.width,
232                height: self.height,
233                channels: N,
234                slice_len: self.buffer.borrow().len(),
235            }));
236        }
237        if self.stride < self.width * N {
238            return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
239        }
240        Ok(())
241    }
242}
243
244impl<T, const N: usize> ImageStore<'_, T, N>
245where
246    T: Clone + Copy + Debug + Default,
247{
248    pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
249        let expected_size = self.stride() * self.height;
250        if self.buffer.as_ref().len() != self.stride() * self.height {
251            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
252                expected: expected_size,
253                width: self.width,
254                height: self.height,
255                channels: N,
256                slice_len: self.buffer.as_ref().len(),
257            }));
258        }
259        if self.stride < self.width * N {
260            return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
261        }
262        Ok(())
263    }
264}
265
266impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
267where
268    T: Clone + Copy + Debug + Default,
269{
270    /// Creates new mutable storage from vector
271    ///
272    /// Always sets bit-depth to `0`
273    pub fn new(
274        slice_ref: Vec<T>,
275        width: usize,
276        height: usize,
277    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
278        let expected_size = width * height * N;
279        if slice_ref.len() != width * height * N {
280            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
281                expected: expected_size,
282                width,
283                height,
284                channels: N,
285                slice_len: slice_ref.len(),
286            }));
287        }
288        Ok(ImageStoreMut::<T, N> {
289            buffer: BufferStore::Owned(slice_ref),
290            channels: N,
291            width,
292            height,
293            stride: width * N,
294            bit_depth: 0,
295        })
296    }
297
298    /// Creates new mutable storage from slice
299    ///
300    /// Always sets bit-depth to `0`
301    pub fn borrow(
302        slice_ref: &'a mut [T],
303        width: usize,
304        height: usize,
305    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
306        let expected_size = width * height * N;
307        if slice_ref.len() != width * height * N {
308            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
309                expected: expected_size,
310                width,
311                height,
312                channels: N,
313                slice_len: slice_ref.len(),
314            }));
315        }
316        Ok(ImageStoreMut::<T, N> {
317            buffer: BufferStore::Borrowed(slice_ref),
318            channels: N,
319            width,
320            height,
321            stride: width * N,
322            bit_depth: 0,
323        })
324    }
325
326    /// Allocates new mutable image storage
327    ///
328    /// Always sets bit depth to `0`
329    pub fn alloc(width: usize, height: usize) -> ImageStoreMut<'a, T, N> {
330        let vc = vec![T::default(); width * N * height];
331        ImageStoreMut::<T, N> {
332            buffer: BufferStore::Owned(vc),
333            channels: N,
334            width,
335            height,
336            stride: width * N,
337            bit_depth: 0,
338        }
339    }
340
341    /// Allocates new mutable image storage with required bit-depth
342    pub fn alloc_with_depth(
343        width: usize,
344        height: usize,
345        bit_depth: usize,
346    ) -> ImageStoreMut<'a, T, N> {
347        let vc = vec![T::default(); width * N * height];
348        ImageStoreMut::<T, N> {
349            buffer: BufferStore::Owned(vc),
350            channels: N,
351            width,
352            height,
353            stride: width * N,
354            bit_depth,
355        }
356    }
357}
358
359impl<T, const N: usize> ImageStoreMut<'_, T, N>
360where
361    T: Clone + Copy + Debug,
362{
363    /// Returns safe stride
364    ///
365    /// If stride set to 0 then returns `width * N`
366    #[inline]
367    pub fn stride(&self) -> usize {
368        if self.stride == 0 {
369            return self.width * N;
370        }
371        self.stride
372    }
373}
374
375impl<T, const N: usize> ImageStore<'_, T, N>
376where
377    T: Clone + Copy + Debug,
378{
379    /// Returns safe stride
380    ///
381    /// If stride set to 0 then returns `width * N`
382    #[inline]
383    pub fn stride(&self) -> usize {
384        if self.stride == 0 {
385            return self.width * N;
386        }
387        self.stride
388    }
389}
390
391impl<'a, T, const N: usize> ImageStore<'a, T, N>
392where
393    T: Clone + Copy + Debug,
394{
395    /// Returns bounded image size
396    pub fn get_size(&self) -> ImageSize {
397        ImageSize::new(self.width, self.height)
398    }
399
400    /// Returns current image store as immutable slice
401    pub fn as_bytes(&self) -> &[T] {
402        match &self.buffer {
403            std::borrow::Cow::Borrowed(br) => br,
404            std::borrow::Cow::Owned(v) => v.as_ref(),
405        }
406    }
407
408    /// Borrows immutable slice int oa new image store
409    pub fn from_slice(
410        slice_ref: &'a [T],
411        width: usize,
412        height: usize,
413    ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
414        let expected_size = width * height * N;
415        if slice_ref.len() != width * height * N {
416            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
417                expected: expected_size,
418                width,
419                height,
420                channels: N,
421                slice_len: slice_ref.len(),
422            }));
423        }
424        Ok(ImageStore::<T, N> {
425            buffer: std::borrow::Cow::Borrowed(slice_ref),
426            channels: N,
427            width,
428            height,
429            stride: width * N,
430            bit_depth: 0,
431        })
432    }
433
434    /// Deep copy immutable image store into a new immutable store
435    pub fn copied<'b>(&self) -> ImageStore<'b, T, N> {
436        ImageStore::<T, N> {
437            buffer: std::borrow::Cow::Owned(self.buffer.as_ref().to_vec()),
438            channels: N,
439            width: self.width,
440            height: self.height,
441            stride: self.width * N,
442            bit_depth: self.bit_depth,
443        }
444    }
445
446    /// Deep copy immutable image into mutable
447    pub fn copied_to_mut(&self, into: &mut ImageStoreMut<T, N>) {
448        let into_stride = into.stride();
449        for (src_row, dst_row) in self
450            .buffer
451            .as_ref()
452            .chunks_exact(self.stride())
453            .zip(into.buffer.borrow_mut().chunks_exact_mut(into_stride))
454        {
455            for (&src, dst) in src_row.iter().zip(dst_row.iter_mut()) {
456                *dst = src;
457            }
458        }
459    }
460}
461
462impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
463where
464    T: Clone + Copy + Debug,
465{
466    /// Returns bounded image size
467    pub fn get_size(&self) -> ImageSize {
468        ImageSize::new(self.width, self.height)
469    }
470
471    /// Returns current image as immutable slice
472    pub fn as_bytes(&self) -> &[T] {
473        match &self.buffer {
474            BufferStore::Borrowed(p) => p,
475            BufferStore::Owned(v) => v,
476        }
477    }
478
479    /// Borrows mutable slice as new image store
480    pub fn from_slice(
481        slice_ref: &'a mut [T],
482        width: usize,
483        height: usize,
484    ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
485        let expected_size = width * height * N;
486        if slice_ref.len() != width * height * N {
487            return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
488                expected: expected_size,
489                width,
490                height,
491                channels: N,
492                slice_len: slice_ref.len(),
493            }));
494        }
495        Ok(ImageStoreMut::<T, N> {
496            buffer: BufferStore::Borrowed(slice_ref),
497            channels: N,
498            width,
499            height,
500            stride: width * N,
501            bit_depth: 0,
502        })
503    }
504
505    /// Performs deep copy into a new mutable image
506    pub fn copied<'b>(&self) -> ImageStoreMut<'b, T, N> {
507        ImageStoreMut::<T, N> {
508            buffer: BufferStore::Owned(self.buffer.borrow().to_vec()),
509            channels: N,
510            width: self.width,
511            height: self.height,
512            stride: self.width * N,
513            bit_depth: self.bit_depth,
514        }
515    }
516
517    /// Performs deep copy into a new immutable image
518    pub fn to_immutable(&self) -> ImageStore<'_, T, N> {
519        ImageStore::<T, N> {
520            buffer: std::borrow::Cow::Owned(self.buffer.borrow().to_owned()),
521            channels: N,
522            width: self.width,
523            height: self.height,
524            stride: self.width * N,
525            bit_depth: self.bit_depth,
526        }
527    }
528}
529
530pub(crate) trait AssociateAlpha<T: Clone + Copy + Debug, const N: usize> {
531    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, T, N>, pool: &Option<ThreadPool>);
532    fn is_alpha_premultiplication_needed(&self) -> bool;
533}
534
535pub(crate) trait UnassociateAlpha<T: Clone + Copy + Debug, const N: usize> {
536    fn unpremultiply_alpha(
537        &mut self,
538        pool: &Option<ThreadPool>,
539        workload_strategy: WorkloadStrategy,
540    );
541}
542
543impl AssociateAlpha<u8, 2> for ImageStore<'_, u8, 2> {
544    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 2>, pool: &Option<ThreadPool>) {
545        let dst_stride = into.stride();
546        let dst = into.buffer.borrow_mut();
547        let src = self.buffer.as_ref();
548        use crate::alpha_handle_u8::premultiply_alpha_gray_alpha;
549        premultiply_alpha_gray_alpha(
550            dst,
551            dst_stride,
552            src,
553            self.width,
554            self.height,
555            self.stride(),
556            pool,
557        );
558    }
559
560    fn is_alpha_premultiplication_needed(&self) -> bool {
561        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha8;
562        has_non_constant_cap_alpha_gray_alpha8(self.buffer.as_ref(), self.width, self.stride())
563    }
564}
565
566impl AssociateAlpha<u16, 2> for ImageStore<'_, u16, 2> {
567    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 2>, pool: &Option<ThreadPool>) {
568        let dst_stride = into.stride();
569        let dst = into.buffer.borrow_mut();
570        let src = self.buffer.as_ref();
571        use crate::alpha_handle_u16::premultiply_alpha_gray_alpha_u16;
572        premultiply_alpha_gray_alpha_u16(
573            dst,
574            dst_stride,
575            src,
576            self.width,
577            self.height,
578            self.stride(),
579            into.bit_depth,
580            pool,
581        );
582    }
583
584    fn is_alpha_premultiplication_needed(&self) -> bool {
585        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha16;
586        has_non_constant_cap_alpha_gray_alpha16(self.buffer.as_ref(), self.width, self.stride())
587    }
588}
589
590impl AssociateAlpha<f32, 2> for ImageStore<'_, f32, 2> {
591    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 2>, pool: &Option<ThreadPool>) {
592        let dst_stride = into.stride();
593        let dst = into.buffer.borrow_mut();
594        let src = self.buffer.as_ref();
595        use crate::alpha_handle_f32::premultiply_alpha_gray_alpha_f32;
596        premultiply_alpha_gray_alpha_f32(
597            dst,
598            dst_stride,
599            src,
600            self.stride(),
601            self.width,
602            self.height,
603            pool,
604        );
605    }
606
607    fn is_alpha_premultiplication_needed(&self) -> bool {
608        use crate::alpha_check::has_non_constant_cap_alpha_gray_alpha_f32;
609        has_non_constant_cap_alpha_gray_alpha_f32(self.buffer.as_ref(), self.width, self.stride())
610    }
611}
612
613impl AssociateAlpha<u8, 4> for ImageStore<'_, u8, 4> {
614    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 4>, pool: &Option<ThreadPool>) {
615        let dst_stride = into.stride();
616        let dst = into.buffer.borrow_mut();
617        let src = self.buffer.as_ref();
618        premultiply_alpha_rgba(
619            dst,
620            dst_stride,
621            src,
622            self.width,
623            self.height,
624            self.stride(),
625            pool,
626        );
627    }
628
629    #[cfg(not(any(
630        any(target_arch = "x86_64", target_arch = "x86"),
631        all(target_arch = "aarch64", target_feature = "neon")
632    )))]
633    fn is_alpha_premultiplication_needed(&self) -> bool {
634        use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
635        has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
636    }
637
638    #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
639    fn is_alpha_premultiplication_needed(&self) -> bool {
640        use crate::neon::neon_has_non_constant_cap_alpha_rgba8;
641        neon_has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
642    }
643
644    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
645    fn is_alpha_premultiplication_needed(&self) -> bool {
646        use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
647        #[cfg(feature = "sse")]
648        use crate::sse::sse_has_non_constant_cap_alpha_rgba8;
649        #[cfg(all(target_arch = "x86_64", feature = "nightly_avx512"))]
650        if std::arch::is_x86_feature_detected!("avx512bw") {
651            use crate::avx512::avx512_has_non_constant_cap_alpha_rgba8;
652            return avx512_has_non_constant_cap_alpha_rgba8(
653                self.buffer.as_ref(),
654                self.width,
655                self.stride(),
656            );
657        }
658        #[cfg(all(target_arch = "x86_64", feature = "avx"))]
659        if std::arch::is_x86_feature_detected!("avx2") {
660            use crate::avx2::avx_has_non_constant_cap_alpha_rgba8;
661            return avx_has_non_constant_cap_alpha_rgba8(
662                self.buffer.as_ref(),
663                self.width,
664                self.stride(),
665            );
666        }
667        #[cfg(feature = "sse")]
668        if std::arch::is_x86_feature_detected!("sse4.1") {
669            return sse_has_non_constant_cap_alpha_rgba8(
670                self.buffer.as_ref(),
671                self.width,
672                self.stride(),
673            );
674        }
675        has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
676    }
677}
678
679impl UnassociateAlpha<u8, 4> for ImageStoreMut<'_, u8, 4> {
680    fn unpremultiply_alpha(
681        &mut self,
682        pool: &Option<ThreadPool>,
683        workload_strategy: WorkloadStrategy,
684    ) {
685        let src_stride = self.stride();
686        let dst = self.buffer.borrow_mut();
687        unpremultiply_alpha_rgba(
688            dst,
689            self.width,
690            self.height,
691            src_stride,
692            pool,
693            workload_strategy,
694        );
695    }
696}
697
698impl UnassociateAlpha<u8, 2> for ImageStoreMut<'_, u8, 2> {
699    fn unpremultiply_alpha(
700        &mut self,
701        pool: &Option<ThreadPool>,
702        workload_strategy: WorkloadStrategy,
703    ) {
704        let src_stride = self.stride();
705        let dst = self.buffer.borrow_mut();
706        use crate::alpha_handle_u8::unpremultiply_alpha_gray_alpha;
707        unpremultiply_alpha_gray_alpha(
708            dst,
709            self.width,
710            self.height,
711            src_stride,
712            pool,
713            workload_strategy,
714        );
715    }
716}
717
718impl UnassociateAlpha<f32, 2> for ImageStoreMut<'_, f32, 2> {
719    fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>, _: WorkloadStrategy) {
720        let src_stride = self.stride();
721        let dst = self.buffer.borrow_mut();
722        use crate::alpha_handle_f32::unpremultiply_alpha_gray_alpha_f32;
723        unpremultiply_alpha_gray_alpha_f32(dst, src_stride, self.width, self.height, pool);
724    }
725}
726
727impl UnassociateAlpha<u16, 2> for ImageStoreMut<'_, u16, 2> {
728    fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>, _: WorkloadStrategy) {
729        let src_stride = self.stride();
730        let dst = self.buffer.borrow_mut();
731        use crate::alpha_handle_u16::unpremultiply_alpha_gray_alpha_u16;
732        unpremultiply_alpha_gray_alpha_u16(
733            dst,
734            src_stride,
735            self.width,
736            self.height,
737            self.bit_depth,
738            pool,
739        );
740    }
741}
742
743impl AssociateAlpha<u16, 4> for ImageStore<'_, u16, 4> {
744    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 4>, pool: &Option<ThreadPool>) {
745        let dst_stride = into.stride();
746        let dst = into.buffer.borrow_mut();
747        let src = self.buffer.as_ref();
748        premultiply_alpha_rgba_u16(
749            dst,
750            dst_stride,
751            src,
752            self.width,
753            self.height,
754            self.stride(),
755            into.bit_depth,
756            pool,
757        );
758    }
759
760    #[cfg(not(any(
761        any(target_arch = "x86_64", target_arch = "x86"),
762        all(target_arch = "aarch64", target_feature = "neon")
763    )))]
764    fn is_alpha_premultiplication_needed(&self) -> bool {
765        use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
766        has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
767    }
768
769    #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
770    fn is_alpha_premultiplication_needed(&self) -> bool {
771        use crate::neon::neon_has_non_constant_cap_alpha_rgba16;
772        neon_has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
773    }
774
775    #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
776    fn is_alpha_premultiplication_needed(&self) -> bool {
777        use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
778        #[cfg(feature = "sse")]
779        use crate::sse::sse_has_non_constant_cap_alpha_rgba16;
780        #[cfg(all(target_arch = "x86_64", feature = "avx"))]
781        if std::arch::is_x86_feature_detected!("avx2") {
782            use crate::avx2::avx_has_non_constant_cap_alpha_rgba16;
783            return avx_has_non_constant_cap_alpha_rgba16(
784                self.buffer.as_ref(),
785                self.width,
786                self.stride(),
787            );
788        }
789        #[cfg(feature = "sse")]
790        if std::arch::is_x86_feature_detected!("sse4.1") {
791            return sse_has_non_constant_cap_alpha_rgba16(
792                self.buffer.as_ref(),
793                self.width,
794                self.stride(),
795            );
796        }
797        has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
798    }
799}
800
801impl AssociateAlpha<f32, 4> for ImageStore<'_, f32, 4> {
802    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 4>, pool: &Option<ThreadPool>) {
803        let src_stride = self.stride();
804        let dst_stride = into.stride();
805        let dst = into.buffer.borrow_mut();
806        let src = self.buffer.as_ref();
807        premultiply_alpha_rgba_f32(
808            dst,
809            dst_stride,
810            src,
811            src_stride,
812            self.width,
813            self.height,
814            pool,
815        );
816    }
817
818    fn is_alpha_premultiplication_needed(&self) -> bool {
819        has_non_constant_cap_alpha_rgba_f32(self.buffer.as_ref(), self.width, self.stride())
820    }
821}
822
823#[cfg(feature = "nightly_f16")]
824impl AssociateAlpha<f16, 4> for ImageStore<'_, f16, 4> {
825    fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f16, 4>, pool: &Option<ThreadPool>) {
826        let src_stride = self.stride();
827        let dst_stride = into.stride();
828        let dst = into.buffer.borrow_mut();
829        let src = self.buffer.as_ref();
830        premultiply_alpha_rgba_f16(
831            dst,
832            dst_stride,
833            src,
834            src_stride,
835            self.width,
836            self.height,
837            pool,
838        );
839    }
840
841    fn is_alpha_premultiplication_needed(&self) -> bool {
842        true
843    }
844}
845
846impl UnassociateAlpha<u16, 4> for ImageStoreMut<'_, u16, 4> {
847    fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>, _: WorkloadStrategy) {
848        let src_stride = self.stride();
849        let in_place = self.buffer.borrow_mut();
850        unpremultiply_alpha_rgba_u16(
851            in_place,
852            src_stride,
853            self.width,
854            self.height,
855            self.bit_depth,
856            pool,
857        );
858    }
859}
860
861impl UnassociateAlpha<f32, 4> for ImageStoreMut<'_, f32, 4> {
862    fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>, _: WorkloadStrategy) {
863        let stride = self.stride();
864        let dst = self.buffer.borrow_mut();
865        unpremultiply_alpha_rgba_f32(dst, stride, self.width, self.height, pool);
866    }
867}
868
869#[cfg(feature = "nightly_f16")]
870impl UnassociateAlpha<f16, 4> for ImageStoreMut<'_, f16, 4> {
871    fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>, _: WorkloadStrategy) {
872        let stride = self.stride();
873        let dst = self.buffer.borrow_mut();
874        unpremultiply_alpha_rgba_f16(dst, stride, self.width, self.height, pool);
875    }
876}
877
878pub type Planar8ImageStore<'a> = ImageStore<'a, u8, 1>;
879pub type Planar8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 1>;
880pub type CbCr8ImageStore<'a> = ImageStore<'a, u8, 2>;
881pub type CbCr8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
882pub type GrayAlpha8ImageStore<'a> = ImageStore<'a, u8, 2>;
883pub type GrayAlpha8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
884pub type Rgba8ImageStore<'a> = ImageStore<'a, u8, 4>;
885pub type Rgba8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 4>;
886pub type Rgb8ImageStore<'a> = ImageStore<'a, u8, 3>;
887pub type Rgb8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 3>;
888
889pub type Planar16ImageStore<'a> = ImageStore<'a, u16, 1>;
890pub type Planar16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 1>;
891pub type CbCr16ImageStore<'a> = ImageStore<'a, u16, 2>;
892pub type CbCr16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
893pub type GrayAlpha16ImageStore<'a> = ImageStore<'a, u16, 2>;
894pub type GrayAlpha16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
895pub type Rgba16ImageStore<'a> = ImageStore<'a, u16, 4>;
896pub type Rgba16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 4>;
897pub type Rgb16ImageStore<'a> = ImageStore<'a, u16, 3>;
898pub type Rgb16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 3>;
899
900#[cfg(feature = "nightly_f16")]
901pub type PlanarF16ImageStore<'a> = ImageStore<'a, f16, 1>;
902#[cfg(feature = "nightly_f16")]
903pub type PlanarF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 1>;
904#[cfg(feature = "nightly_f16")]
905pub type CbCrF16ImageStore<'a> = ImageStore<'a, f16, 2>;
906#[cfg(feature = "nightly_f16")]
907pub type CbCrF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 2>;
908#[cfg(feature = "nightly_f16")]
909pub type RgbaF16ImageStore<'a> = ImageStore<'a, f16, 4>;
910#[cfg(feature = "nightly_f16")]
911pub type RgbaF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 4>;
912#[cfg(feature = "nightly_f16")]
913pub type RgbF16ImageStore<'a> = ImageStore<'a, f16, 3>;
914#[cfg(feature = "nightly_f16")]
915pub type RgbF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 3>;
916
917pub type PlanarF32ImageStore<'a> = ImageStore<'a, f32, 1>;
918pub type PlanarF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 1>;
919pub type CbCrF32ImageStore<'a> = ImageStore<'a, f32, 2>;
920pub type CbCrF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
921pub type GrayAlphaF32ImageStore<'a> = ImageStore<'a, f32, 2>;
922pub type GrayAlphaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
923pub type RgbaF32ImageStore<'a> = ImageStore<'a, f32, 4>;
924pub type RgbaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 4>;
925pub type RgbF32ImageStore<'a> = ImageStore<'a, f32, 3>;
926pub type RgbF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 3>;
927
928#[cfg(test)]
929mod tests {
930    use super::*;
931
932    #[test]
933    fn image_store_alpha_test_rgba8() {
934        let image_size = 256usize;
935        let mut image = vec![0u8; image_size * image_size * 4];
936        image[3 + 150 * 4] = 75;
937        let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
938        let has_alpha = store.is_alpha_premultiplication_needed();
939        assert_eq!(true, has_alpha);
940    }
941
942    #[test]
943    fn check_alpha_not_exists_rgba8() {
944        let image_size = 256usize;
945        let image = vec![255u8; image_size * image_size * 4];
946        let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
947        let has_alpha = store.is_alpha_premultiplication_needed();
948        assert_eq!(false, has_alpha);
949    }
950}