image_blend/
blend_ops.rs

1use std::{
2    iter::{zip, Zip},
3    ops::{Deref, DerefMut},
4    vec,
5};
6
7use image::{GenericImageView, ImageBuffer, Pixel};
8use num_traits::{Bounded, NumCast};
9
10use crate::{
11    enums::{ColorString, ColorStructure},
12    error::Error,
13};
14
15pub(crate) fn dims_match<T: GenericImageView, U: GenericImageView>(a: &mut T, b: &U) -> Result<(), Error> {
16    if (a.dimensions()) != b.dimensions() {
17        return Err(Error::DimensionMismatch);
18    }
19    Ok(())
20}
21pub trait BufferBlend<P, Container>
22where
23    P: Pixel,
24    Container: Deref<Target = [P::Subpixel]> + AsRef<[P::Subpixel]>,
25{
26    /**
27    Blend `other` into `self` using the function `op`, where arg 0 is self and 1 is other.
28
29    Handles type conversion and alpha channel detection and placement automatically.
30    
31
32    You may blend a luma image into an rgb image (in which case the luma image will be treated as a grayscale rgb image), but you cannot blend an rgba image into a luma image.
33
34    If `other` has an alpha channel, the output is weighted by this alpha channel (so if alpha for `other` for this pixel is 0.5, the blend effect will be 0.5 as strong)
35
36    # Arguments
37
38    Use `apply_to_color` and `apply_to_alpha` to control which channels are affected.
39
40    If `apply_to_alpha` is true but `self` or `other` does not have an alpha channel, this option has no effect.
41
42    `op` is a function that takes two f64 values and returns a f64 value. (e.g. `|self, other| self + other`)
43
44    Standard blend modes such as those found in photoshop are provided as functions (e.g. `pixel_add`, `pixel_mult`, etc.).
45
46    The values are normalized to the range 0.0..1.0 before blending, and then scaled back to the input type's range.
47
48    The output from `op` is automatically clamped from 0.0..1.0 before being converted back to the input type so you don't need to worry about overflow/underflow.
49
50    # Errors
51
52    `DimensionMismatch`: `self` and `other` have different dimensions
53
54    `UnsupportedBlend`: `self` is a luma image and `other` is an rgb image
55
56    # Examples
57
58    ## Example 1:
59
60    Using the `pixel_mult` function to blend two images together:
61    ```
62    use image::open;
63    use image_blend::BufferBlend;
64    use image_blend::pixelops::pixel_mult;
65
66    // Load an image
67    let mut img1_dynamic = open("test_data/1.png").unwrap();
68    let mut img1_buffer = img1_dynamic.as_mut_rgba8().unwrap();
69
70    // Load another image
71    let img2_dynamic = open("test_data/2.png").unwrap();
72    let img2_buffer = img2_dynamic.to_rgba16();
73
74    // Blend the images using the pixel_mult function
75    img1_buffer.blend(&img2_buffer, pixel_mult, true, false).unwrap();
76    img1_buffer.save("tests_out/doctest_buffer_blend_result.png").unwrap();
77
78    ```
79    ## Example 2:
80
81    Using a custom function to blend two images together:
82
83    ```
84    use image::open;
85    use image_blend::BufferBlend;
86
87    let closest_to_gray = |a: f64, b: f64| {
88        let a_diff = (a - 0.5).abs();
89        let b_diff = (b - 0.5).abs();
90        if a_diff < b_diff {
91            a
92        } else {
93            b
94        }
95    };
96
97    // Load an image
98    let mut img1_dynamic = open("test_data/1.png").unwrap();
99    let mut img1_buffer = img1_dynamic.as_mut_rgba8().unwrap();
100
101    // Load another image
102    let img2_dynamic = open("test_data/2.png").unwrap();
103    let img2_buffer = img2_dynamic.to_rgba16();
104
105    // Blend the images using our custom function
106    img1_buffer.blend(&img2_buffer, closest_to_gray, true, false).unwrap();
107    img1_buffer.save("tests_out/doctest_buffer_custom_result.png").unwrap();
108
109    ```
110    */
111    fn blend(
112        &mut self,
113        other: &ImageBuffer<P, Container>,
114        op: fn(f64, f64) -> f64,
115        apply_to_color: bool,
116        apply_to_alpha: bool,
117    ) -> Result<(), Error>;
118}
119impl<P, Pmut, Container, ContainerMut> BufferBlend<P, Container> for ImageBuffer<Pmut, ContainerMut>
120where
121    Pmut: Pixel,
122    P: Pixel,
123    Container: Deref<Target = [P::Subpixel]> + AsRef<[<P as Pixel>::Subpixel]>,
124    ContainerMut: DerefMut<Target = [Pmut::Subpixel]>
125        + DerefMut<Target = [Pmut::Subpixel]>
126        + AsMut<[<Pmut as Pixel>::Subpixel]>,
127{
128    fn blend(
129        &mut self,
130        other: &ImageBuffer<P, Container>,
131        op: fn(f64, f64) -> f64,
132        apply_to_color: bool,
133        apply_to_alpha: bool,
134    ) -> Result<(), Error> {
135        dims_match(self, other)?;
136        let structure_a: ColorStructure = self.sample_layout().try_into()?;
137        let structure_b: ColorStructure = other.sample_layout().try_into()?;
138
139        let (color_channels, alpha_channels) = get_channels(&structure_a, &structure_b)?;
140
141        let a_max = type_max::<Pmut>();
142        let b_max = type_max::<P>();
143
144        if apply_to_color {
145            zip(self.pixels_mut(), other.pixels()).for_each(|(px_a, px_b)| {
146                let channel_a = px_a.channels_mut();
147                let channel_b = px_b.channels();
148                let alpha_weight = match structure_b.alpha_channel() {
149                    Some(alpha_channel) => {
150                        <f64 as NumCast>::from(channel_b[alpha_channel]).unwrap() / b_max
151                    }
152                    None => 1.,
153                };
154                if alpha_weight == 0. {
155                    return;
156                };
157                color_channels.clone().for_each(|(ch_a, ch_b)| {
158                    let a_f64: f64 = <f64 as NumCast>::from(channel_a[ch_a]).unwrap() / a_max;
159                    let b_f64: f64 = <f64 as NumCast>::from(channel_b[ch_b]).unwrap() / b_max;
160                    let new_64_unweighted: f64 = NumCast::from(op(a_f64, b_f64)).unwrap();
161                    let new_64 = new_64_unweighted * alpha_weight + a_f64 * (1. - alpha_weight);
162                    let new_val = NumCast::from(new_64.clamp(0., 1.0) * a_max).unwrap();
163                    channel_a[ch_a] = new_val;
164                });
165            });
166        };
167        if apply_to_alpha {
168            if let Some((alpha_a, alpha_b)) = alpha_channels {
169                zip(self.pixels_mut(), other.pixels()).for_each(|(px_a, px_b)| {
170                    let channel_a = px_a.channels_mut();
171                    let channel_b = px_b.channels();
172
173                    let a_f64: f64 = <f64 as NumCast>::from(channel_a[alpha_a]).unwrap() / a_max;
174                    let b_f64: f64 = <f64 as NumCast>::from(channel_b[alpha_b]).unwrap() / b_max;
175                    let new_64: f64 = NumCast::from(op(a_f64, b_f64)).unwrap();
176                    let new_val = NumCast::from(new_64.clamp(0., 1.0) * a_max).unwrap();
177                    channel_a[alpha_a] = new_val;
178                });
179            }
180        }
181
182        Ok(())
183    }
184}
185
186pub(crate) fn type_max<P>() -> f64 where P: Pixel {
187    let max: f64 = NumCast::from(<P as Pixel>::Subpixel::max_value()).unwrap();
188    let f32_max: f64 = NumCast::from(<f32 as Bounded>::max_value()).unwrap();
189    // Hack to get around f32 images having a max value of 1.0 not f32::MAX
190    if max - f32_max == 0. {
191        return 1.
192    }
193    max
194}
195
196type ChannelIter = (
197    Zip<vec::IntoIter<usize>, vec::IntoIter<usize>>,
198    Option<(usize, usize)>,
199);
200fn get_channels(
201    structure_a: &ColorStructure,
202    structure_b: &ColorStructure,
203) -> Result<ChannelIter, Error> {
204    let color_channels = match (structure_a.rgb(), structure_b.rgb()) {
205        (true, true) => zip(vec![0usize, 1, 2], vec![0usize, 1, 2]),
206        (true, false) => zip(vec![0, 1, 2], vec![0, 0, 0]),
207        (false, false) => zip(vec![0], vec![0]),
208        (false, true) => Err(Error::UnsupportedBlend(
209            structure_a.color_str(),
210            structure_b.color_str(),
211        ))?,
212    };
213    let alpha_channels = match (structure_a.alpha(), structure_b.alpha()) {
214        (true, true) => Some((
215            structure_a.alpha_channel().unwrap(),
216            structure_b.alpha_channel().unwrap(),
217        )),
218        _ => None,
219    };
220    Ok((color_channels, alpha_channels))
221}