1use crate::alpha_check::has_non_constant_cap_alpha_rgba_f32;
30#[cfg(feature = "nightly_f16")]
31use crate::alpha_handle_f16::{premultiply_alpha_rgba_f16, unpremultiply_alpha_rgba_f16};
32use crate::alpha_handle_f32::{premultiply_alpha_rgba_f32, unpremultiply_alpha_rgba_f32};
33use crate::alpha_handle_u16::{premultiply_alpha_rgba_u16, unpremultiply_alpha_rgba_u16};
34use crate::alpha_handle_u8::{premultiply_alpha_rgba, unpremultiply_alpha_rgba};
35use crate::pic_scale_error::{PicScaleBufferMismatch, PicScaleError};
36use crate::ImageSize;
37#[cfg(feature = "nightly_f16")]
38use core::f16;
39use rayon::ThreadPool;
40use std::fmt::Debug;
41
42#[derive(Debug, Clone)]
52pub struct ImageStore<'a, T, const N: usize>
53where
54 T: Clone + Copy + Debug,
55{
56 pub buffer: std::borrow::Cow<'a, [T]>,
57 pub channels: usize,
59 pub width: usize,
61 pub height: usize,
63 pub stride: usize,
65 pub bit_depth: usize,
67}
68
69#[derive(Debug)]
79pub struct ImageStoreMut<'a, T, const N: usize>
80where
81 T: Clone + Copy + Debug,
82{
83 pub buffer: BufferStore<'a, T>,
84 pub channels: usize,
86 pub width: usize,
88 pub height: usize,
90 pub stride: usize,
92 pub bit_depth: usize,
94}
95
96pub(crate) trait CheckStoreDensity {
97 fn should_have_bit_depth(&self) -> bool;
98}
99
100#[derive(Debug)]
101pub enum BufferStore<'a, T: Copy + Debug> {
102 Borrowed(&'a mut [T]),
103 Owned(Vec<T>),
104}
105
106impl<T: Copy + Debug> BufferStore<'_, T> {
107 #[allow(clippy::should_implement_trait)]
108 pub fn borrow(&self) -> &[T] {
109 match self {
110 Self::Borrowed(p_ref) => p_ref,
111 Self::Owned(vec) => vec,
112 }
113 }
114
115 #[allow(clippy::should_implement_trait)]
116 pub fn borrow_mut(&mut self) -> &mut [T] {
117 match self {
118 Self::Borrowed(p_ref) => p_ref,
119 Self::Owned(vec) => vec,
120 }
121 }
122}
123
124impl<'a, T, const N: usize> ImageStore<'a, T, N>
125where
126 T: Clone + Copy + Debug + Default,
127{
128 pub fn new(
129 slice_ref: Vec<T>,
130 width: usize,
131 height: usize,
132 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
133 let expected_size = width * height * N;
134 if slice_ref.len() != width * height * N {
135 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
136 expected: expected_size,
137 width,
138 height,
139 channels: N,
140 slice_len: slice_ref.len(),
141 }));
142 }
143 Ok(ImageStore::<T, N> {
144 buffer: std::borrow::Cow::Owned(slice_ref),
145 channels: N,
146 width,
147 height,
148 stride: width * N,
149 bit_depth: 0,
150 })
151 }
152
153 pub fn borrow(
154 slice_ref: &'a [T],
155 width: usize,
156 height: usize,
157 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
158 let expected_size = width * height * N;
159 if slice_ref.len() != width * height * N {
160 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
161 expected: expected_size,
162 width,
163 height,
164 channels: N,
165 slice_len: slice_ref.len(),
166 }));
167 }
168 Ok(ImageStore::<T, N> {
169 buffer: std::borrow::Cow::Borrowed(slice_ref),
170 channels: N,
171 width,
172 height,
173 stride: width * N,
174 bit_depth: 0,
175 })
176 }
177
178 pub fn alloc(width: usize, height: usize) -> ImageStore<'a, T, N> {
179 let vc = vec![T::default(); width * N * height];
180 ImageStore::<T, N> {
181 buffer: std::borrow::Cow::Owned(vc),
182 channels: N,
183 width,
184 height,
185 stride: width * N,
186 bit_depth: 0,
187 }
188 }
189}
190
191impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u8, N> {
192 fn should_have_bit_depth(&self) -> bool {
193 false
194 }
195}
196
197impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f32, N> {
198 fn should_have_bit_depth(&self) -> bool {
199 false
200 }
201}
202
203#[cfg(feature = "nightly_f16")]
204impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, f16, N> {
205 fn should_have_bit_depth(&self) -> bool {
206 false
207 }
208}
209
210impl<const N: usize> CheckStoreDensity for ImageStoreMut<'_, u16, N> {
211 fn should_have_bit_depth(&self) -> bool {
212 true
213 }
214}
215
216impl<T, const N: usize> ImageStoreMut<'_, T, N>
217where
218 T: Clone + Copy + Debug + Default,
219{
220 pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
221 let expected_size = self.stride() * self.height;
222 if self.buffer.borrow().len() != self.stride() * self.height {
223 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
224 expected: expected_size,
225 width: self.width,
226 height: self.height,
227 channels: N,
228 slice_len: self.buffer.borrow().len(),
229 }));
230 }
231 if self.stride < self.width * N {
232 return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
233 }
234 Ok(())
235 }
236}
237
238impl<T, const N: usize> ImageStore<'_, T, N>
239where
240 T: Clone + Copy + Debug + Default,
241{
242 pub(crate) fn validate(&self) -> Result<(), PicScaleError> {
243 let expected_size = self.stride() * self.height;
244 if self.buffer.as_ref().len() != self.stride() * self.height {
245 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
246 expected: expected_size,
247 width: self.width,
248 height: self.height,
249 channels: N,
250 slice_len: self.buffer.as_ref().len(),
251 }));
252 }
253 if self.stride < self.width * N {
254 return Err(PicScaleError::InvalidStride(self.width * N, self.stride));
255 }
256 Ok(())
257 }
258}
259
260impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
261where
262 T: Clone + Copy + Debug + Default,
263{
264 pub fn new(
268 slice_ref: Vec<T>,
269 width: usize,
270 height: usize,
271 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
272 let expected_size = width * height * N;
273 if slice_ref.len() != width * height * N {
274 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
275 expected: expected_size,
276 width,
277 height,
278 channels: N,
279 slice_len: slice_ref.len(),
280 }));
281 }
282 Ok(ImageStoreMut::<T, N> {
283 buffer: BufferStore::Owned(slice_ref),
284 channels: N,
285 width,
286 height,
287 stride: width * N,
288 bit_depth: 0,
289 })
290 }
291
292 pub fn borrow(
296 slice_ref: &'a mut [T],
297 width: usize,
298 height: usize,
299 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
300 let expected_size = width * height * N;
301 if slice_ref.len() != width * height * N {
302 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
303 expected: expected_size,
304 width,
305 height,
306 channels: N,
307 slice_len: slice_ref.len(),
308 }));
309 }
310 Ok(ImageStoreMut::<T, N> {
311 buffer: BufferStore::Borrowed(slice_ref),
312 channels: N,
313 width,
314 height,
315 stride: width * N,
316 bit_depth: 0,
317 })
318 }
319
320 pub fn alloc(width: usize, height: usize) -> ImageStoreMut<'a, T, N> {
324 let vc = vec![T::default(); width * N * height];
325 ImageStoreMut::<T, N> {
326 buffer: BufferStore::Owned(vc),
327 channels: N,
328 width,
329 height,
330 stride: width * N,
331 bit_depth: 0,
332 }
333 }
334
335 pub fn alloc_with_depth(
337 width: usize,
338 height: usize,
339 bit_depth: usize,
340 ) -> ImageStoreMut<'a, T, N> {
341 let vc = vec![T::default(); width * N * height];
342 ImageStoreMut::<T, N> {
343 buffer: BufferStore::Owned(vc),
344 channels: N,
345 width,
346 height,
347 stride: width * N,
348 bit_depth,
349 }
350 }
351}
352
353impl<T, const N: usize> ImageStoreMut<'_, T, N>
354where
355 T: Clone + Copy + Debug,
356{
357 #[inline]
361 pub fn stride(&self) -> usize {
362 if self.stride == 0 {
363 return self.width * N;
364 }
365 self.stride
366 }
367}
368
369impl<T, const N: usize> ImageStore<'_, T, N>
370where
371 T: Clone + Copy + Debug,
372{
373 #[inline]
377 pub fn stride(&self) -> usize {
378 if self.stride == 0 {
379 return self.width * N;
380 }
381 self.stride
382 }
383}
384
385impl<'a, T, const N: usize> ImageStore<'a, T, N>
386where
387 T: Clone + Copy + Debug,
388{
389 pub fn get_size(&self) -> ImageSize {
391 ImageSize::new(self.width, self.height)
392 }
393
394 pub fn as_bytes(&self) -> &[T] {
395 match &self.buffer {
396 std::borrow::Cow::Borrowed(br) => br,
397 std::borrow::Cow::Owned(v) => v.as_ref(),
398 }
399 }
400
401 pub fn from_slice(
402 slice_ref: &'a [T],
403 width: usize,
404 height: usize,
405 ) -> Result<ImageStore<'a, T, N>, PicScaleError> {
406 let expected_size = width * height * N;
407 if slice_ref.len() != width * height * N {
408 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
409 expected: expected_size,
410 width,
411 height,
412 channels: N,
413 slice_len: slice_ref.len(),
414 }));
415 }
416 Ok(ImageStore::<T, N> {
417 buffer: std::borrow::Cow::Borrowed(slice_ref),
418 channels: N,
419 width,
420 height,
421 stride: width * N,
422 bit_depth: 0,
423 })
424 }
425
426 pub fn copied<'b>(&self) -> ImageStore<'b, T, N> {
427 ImageStore::<T, N> {
428 buffer: std::borrow::Cow::Owned(self.buffer.as_ref().to_vec()),
429 channels: N,
430 width: self.width,
431 height: self.height,
432 stride: self.width * N,
433 bit_depth: self.bit_depth,
434 }
435 }
436
437 pub fn copied_to_mut(&self, into: &mut ImageStoreMut<T, N>) {
438 let into_stride = into.stride();
439 for (src_row, dst_row) in self
440 .buffer
441 .as_ref()
442 .chunks_exact(self.stride())
443 .zip(into.buffer.borrow_mut().chunks_exact_mut(into_stride))
444 {
445 for (&src, dst) in src_row.iter().zip(dst_row.iter_mut()) {
446 *dst = src;
447 }
448 }
449 }
450}
451
452impl<'a, T, const N: usize> ImageStoreMut<'a, T, N>
453where
454 T: Clone + Copy + Debug,
455{
456 pub fn get_size(&self) -> ImageSize {
458 ImageSize::new(self.width, self.height)
459 }
460
461 pub fn as_bytes(&self) -> &[T] {
462 match &self.buffer {
463 BufferStore::Borrowed(p) => p,
464 BufferStore::Owned(v) => v,
465 }
466 }
467
468 pub fn from_slice(
469 slice_ref: &'a mut [T],
470 width: usize,
471 height: usize,
472 ) -> Result<ImageStoreMut<'a, T, N>, PicScaleError> {
473 let expected_size = width * height * N;
474 if slice_ref.len() != width * height * N {
475 return Err(PicScaleError::BufferMismatch(PicScaleBufferMismatch {
476 expected: expected_size,
477 width,
478 height,
479 channels: N,
480 slice_len: slice_ref.len(),
481 }));
482 }
483 Ok(ImageStoreMut::<T, N> {
484 buffer: BufferStore::Borrowed(slice_ref),
485 channels: N,
486 width,
487 height,
488 stride: width * N,
489 bit_depth: 0,
490 })
491 }
492
493 pub fn copied<'b>(&self) -> ImageStoreMut<'b, T, N> {
494 ImageStoreMut::<T, N> {
495 buffer: BufferStore::Owned(self.buffer.borrow().to_vec()),
496 channels: N,
497 width: self.width,
498 height: self.height,
499 stride: self.width * N,
500 bit_depth: self.bit_depth,
501 }
502 }
503
504 pub fn to_immutable(&self) -> ImageStore<'_, T, N> {
505 ImageStore::<T, N> {
506 buffer: std::borrow::Cow::Owned(self.buffer.borrow().to_owned()),
507 channels: N,
508 width: self.width,
509 height: self.height,
510 stride: self.width * N,
511 bit_depth: self.bit_depth,
512 }
513 }
514}
515
516pub(crate) trait AssociateAlpha<T: Clone + Copy + Debug, const N: usize> {
517 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, T, N>, pool: &Option<ThreadPool>);
518 fn is_alpha_premultiplication_needed(&self) -> bool;
519}
520
521pub(crate) trait UnassociateAlpha<T: Clone + Copy + Debug, const N: usize> {
522 fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>);
523}
524
525impl AssociateAlpha<u8, 4> for ImageStore<'_, u8, 4> {
526 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u8, 4>, pool: &Option<ThreadPool>) {
527 let dst_stride = into.stride();
528 let dst = into.buffer.borrow_mut();
529 let src = self.buffer.as_ref();
530 premultiply_alpha_rgba(
531 dst,
532 dst_stride,
533 src,
534 self.width,
535 self.height,
536 self.stride(),
537 pool,
538 );
539 }
540
541 #[cfg(not(any(
542 any(target_arch = "x86_64", target_arch = "x86"),
543 all(target_arch = "aarch64", target_feature = "neon")
544 )))]
545 fn is_alpha_premultiplication_needed(&self) -> bool {
546 use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
547 has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
548 }
549
550 #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
551 fn is_alpha_premultiplication_needed(&self) -> bool {
552 use crate::neon::neon_has_non_constant_cap_alpha_rgba8;
553 neon_has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
554 }
555
556 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
557 fn is_alpha_premultiplication_needed(&self) -> bool {
558 use crate::alpha_check::has_non_constant_cap_alpha_rgba8;
559 #[cfg(feature = "sse")]
560 use crate::sse::sse_has_non_constant_cap_alpha_rgba8;
561 #[cfg(all(target_arch = "x86_64", feature = "nightly_avx512"))]
562 if std::arch::is_x86_feature_detected!("avx512bw") {
563 use crate::avx512::avx512_has_non_constant_cap_alpha_rgba8;
564 return avx512_has_non_constant_cap_alpha_rgba8(
565 self.buffer.as_ref(),
566 self.width,
567 self.stride(),
568 );
569 }
570 #[cfg(all(target_arch = "x86_64", feature = "avx"))]
571 if std::arch::is_x86_feature_detected!("avx2") {
572 use crate::avx2::avx_has_non_constant_cap_alpha_rgba8;
573 return avx_has_non_constant_cap_alpha_rgba8(
574 self.buffer.as_ref(),
575 self.width,
576 self.stride(),
577 );
578 }
579 #[cfg(feature = "sse")]
580 if std::arch::is_x86_feature_detected!("sse4.1") {
581 return sse_has_non_constant_cap_alpha_rgba8(
582 self.buffer.as_ref(),
583 self.width,
584 self.stride(),
585 );
586 }
587 has_non_constant_cap_alpha_rgba8(self.buffer.as_ref(), self.width, self.stride())
588 }
589}
590
591impl UnassociateAlpha<u8, 4> for ImageStoreMut<'_, u8, 4> {
592 fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>) {
593 let src_stride = self.stride();
594 let dst = self.buffer.borrow_mut();
595 unpremultiply_alpha_rgba(dst, self.width, self.height, src_stride, pool);
596 }
597}
598
599impl AssociateAlpha<u16, 4> for ImageStore<'_, u16, 4> {
600 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, u16, 4>, pool: &Option<ThreadPool>) {
601 let dst_stride = into.stride();
602 let dst = into.buffer.borrow_mut();
603 let src = self.buffer.as_ref();
604 premultiply_alpha_rgba_u16(
605 dst,
606 dst_stride,
607 src,
608 self.width,
609 self.height,
610 self.stride(),
611 into.bit_depth,
612 pool,
613 );
614 }
615
616 #[cfg(not(any(
617 any(target_arch = "x86_64", target_arch = "x86"),
618 all(target_arch = "aarch64", target_feature = "neon")
619 )))]
620 fn is_alpha_premultiplication_needed(&self) -> bool {
621 use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
622 has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
623 }
624
625 #[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
626 fn is_alpha_premultiplication_needed(&self) -> bool {
627 use crate::neon::neon_has_non_constant_cap_alpha_rgba16;
628 neon_has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
629 }
630
631 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
632 fn is_alpha_premultiplication_needed(&self) -> bool {
633 use crate::alpha_check::has_non_constant_cap_alpha_rgba16;
634 #[cfg(feature = "sse")]
635 use crate::sse::sse_has_non_constant_cap_alpha_rgba16;
636 #[cfg(all(target_arch = "x86_64", feature = "avx"))]
637 if std::arch::is_x86_feature_detected!("avx2") {
638 use crate::avx2::avx_has_non_constant_cap_alpha_rgba16;
639 return avx_has_non_constant_cap_alpha_rgba16(
640 self.buffer.as_ref(),
641 self.width,
642 self.stride(),
643 );
644 }
645 #[cfg(feature = "sse")]
646 if std::arch::is_x86_feature_detected!("sse4.1") {
647 return sse_has_non_constant_cap_alpha_rgba16(
648 self.buffer.as_ref(),
649 self.width,
650 self.stride(),
651 );
652 }
653 has_non_constant_cap_alpha_rgba16(self.buffer.as_ref(), self.width, self.stride())
654 }
655}
656
657impl AssociateAlpha<f32, 4> for ImageStore<'_, f32, 4> {
658 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f32, 4>, pool: &Option<ThreadPool>) {
659 let src_stride = self.stride();
660 let dst_stride = into.stride();
661 let dst = into.buffer.borrow_mut();
662 let src = self.buffer.as_ref();
663 premultiply_alpha_rgba_f32(
664 dst,
665 dst_stride,
666 src,
667 src_stride,
668 self.width,
669 self.height,
670 pool,
671 );
672 }
673
674 fn is_alpha_premultiplication_needed(&self) -> bool {
675 has_non_constant_cap_alpha_rgba_f32(self.buffer.as_ref(), self.width, self.stride())
676 }
677}
678
679#[cfg(feature = "nightly_f16")]
680impl AssociateAlpha<f16, 4> for ImageStore<'_, f16, 4> {
681 fn premultiply_alpha(&self, into: &mut ImageStoreMut<'_, f16, 4>, pool: &Option<ThreadPool>) {
682 let src_stride = self.stride();
683 let dst_stride = into.stride();
684 let dst = into.buffer.borrow_mut();
685 let src = self.buffer.as_ref();
686 premultiply_alpha_rgba_f16(
687 dst,
688 dst_stride,
689 src,
690 src_stride,
691 self.width,
692 self.height,
693 pool,
694 );
695 }
696
697 fn is_alpha_premultiplication_needed(&self) -> bool {
698 true
699 }
700}
701
702impl UnassociateAlpha<u16, 4> for ImageStoreMut<'_, u16, 4> {
703 fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>) {
704 let src_stride = self.stride();
705 let in_place = self.buffer.borrow_mut();
706 unpremultiply_alpha_rgba_u16(
707 in_place,
708 src_stride,
709 self.width,
710 self.height,
711 self.bit_depth,
712 pool,
713 );
714 }
715}
716
717impl UnassociateAlpha<f32, 4> for ImageStoreMut<'_, f32, 4> {
718 fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>) {
719 let stride = self.stride();
720 let dst = self.buffer.borrow_mut();
721 unpremultiply_alpha_rgba_f32(dst, stride, self.width, self.height, pool);
722 }
723}
724
725#[cfg(feature = "nightly_f16")]
726impl UnassociateAlpha<f16, 4> for ImageStoreMut<'_, f16, 4> {
727 fn unpremultiply_alpha(&mut self, pool: &Option<ThreadPool>) {
728 let stride = self.stride();
729 let dst = self.buffer.borrow_mut();
730 unpremultiply_alpha_rgba_f16(dst, stride, self.width, self.height, pool);
731 }
732}
733
734pub type Planar8ImageStore<'a> = ImageStore<'a, u8, 1>;
735pub type Planar8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 1>;
736pub type CbCr8ImageStore<'a> = ImageStore<'a, u8, 2>;
737pub type CbCr8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 2>;
738pub type Rgba8ImageStore<'a> = ImageStore<'a, u8, 4>;
739pub type Rgba8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 4>;
740pub type Rgb8ImageStore<'a> = ImageStore<'a, u8, 3>;
741pub type Rgb8ImageStoreMut<'a> = ImageStoreMut<'a, u8, 3>;
742
743pub type Planar16ImageStore<'a> = ImageStore<'a, u16, 1>;
744pub type Planar16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 1>;
745pub type CbCr16ImageStore<'a> = ImageStore<'a, u16, 2>;
746pub type CbCr16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 2>;
747pub type Rgba16ImageStore<'a> = ImageStore<'a, u16, 4>;
748pub type Rgba16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 4>;
749pub type Rgb16ImageStore<'a> = ImageStore<'a, u16, 3>;
750pub type Rgb16ImageStoreMut<'a> = ImageStoreMut<'a, u16, 3>;
751
752#[cfg(feature = "nightly_f16")]
753pub type PlanarF16ImageStore<'a> = ImageStore<'a, f16, 1>;
754#[cfg(feature = "nightly_f16")]
755pub type PlanarF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 1>;
756#[cfg(feature = "nightly_f16")]
757pub type CbCrF16ImageStore<'a> = ImageStore<'a, f16, 2>;
758#[cfg(feature = "nightly_f16")]
759pub type CbCrF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 2>;
760#[cfg(feature = "nightly_f16")]
761pub type RgbaF16ImageStore<'a> = ImageStore<'a, f16, 4>;
762#[cfg(feature = "nightly_f16")]
763pub type RgbaF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 4>;
764#[cfg(feature = "nightly_f16")]
765pub type RgbF16ImageStore<'a> = ImageStore<'a, f16, 3>;
766#[cfg(feature = "nightly_f16")]
767pub type RgbF16ImageStoreMut<'a> = ImageStoreMut<'a, f16, 3>;
768
769pub type PlanarF32ImageStore<'a> = ImageStore<'a, f32, 1>;
770pub type PlanarF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 1>;
771pub type CbCrF32ImageStore<'a> = ImageStore<'a, f32, 2>;
772pub type CbCrF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 2>;
773pub type RgbaF32ImageStore<'a> = ImageStore<'a, f32, 4>;
774pub type RgbaF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 4>;
775pub type RgbF32ImageStore<'a> = ImageStore<'a, f32, 3>;
776pub type RgbF32ImageStoreMut<'a> = ImageStoreMut<'a, f32, 3>;
777
778#[cfg(test)]
779mod tests {
780 use super::*;
781
782 #[test]
783 fn image_store_alpha_test_rgba8() {
784 let image_size = 256usize;
785 let mut image = vec![0u8; image_size * image_size * 4];
786 image[3 + 150 * 4] = 75;
787 let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
788 let has_alpha = store.is_alpha_premultiplication_needed();
789 assert_eq!(true, has_alpha);
790 }
791
792 #[test]
793 fn check_alpha_not_exists_rgba8() {
794 let image_size = 256usize;
795 let image = vec![255u8; image_size * image_size * 4];
796 let store = ImageStore::<u8, 4>::from_slice(&image, image_size, image_size).unwrap();
797 let has_alpha = store.is_alpha_premultiplication_needed();
798 assert_eq!(false, has_alpha);
799 }
800}