burn_core/nn/conv/
conv_transpose2d.rs

1use alloc::format;
2
3use crate as burn;
4
5use crate::config::Config;
6use crate::module::Content;
7use crate::module::DisplaySettings;
8use crate::module::Module;
9use crate::module::ModuleDisplay;
10use crate::module::Param;
11use crate::nn::Initializer;
12use crate::nn::conv::checks;
13use crate::tensor::Tensor;
14use crate::tensor::backend::Backend;
15use crate::tensor::module::conv_transpose2d;
16use crate::tensor::ops::ConvTransposeOptions;
17
18/// Configuration to create an [2D transposed convolution](ConvTranspose2d) layer
19/// using the [init function](ConvTranspose2dConfig::init).
20#[derive(Config, Debug)]
21pub struct ConvTranspose2dConfig {
22    /// The number of channels.
23    pub channels: [usize; 2],
24    /// The size of the kernel.
25    pub kernel_size: [usize; 2],
26    /// The stride of the convolution.
27    #[config(default = "[1, 1]")]
28    pub stride: [usize; 2],
29    /// Spacing between kernel elements.
30    #[config(default = "[1, 1]")]
31    pub dilation: [usize; 2],
32    /// Controls the connections between input and output channels.
33    #[config(default = "1")]
34    pub groups: usize,
35    /// The padding configuration.
36    #[config(default = "[0, 0]")]
37    pub padding: [usize; 2],
38    /// The padding output configuration.
39    #[config(default = "[0, 0]")]
40    pub padding_out: [usize; 2],
41    /// If bias should be added to the output.
42    #[config(default = true)]
43    pub bias: bool,
44    /// The type of function used to initialize neural network parameters
45    #[config(
46        default = "Initializer::KaimingUniform{gain:1.0/num_traits::Float::sqrt(3.0),fan_out_only:false}"
47    )]
48    pub initializer: Initializer,
49}
50
51/// Applies a 2D transposed convolution over input tensors.
52#[derive(Module, Debug)]
53#[module(custom_display)]
54pub struct ConvTranspose2d<B: Backend> {
55    /// Tensor of shape `[channels_in, channels_out / groups, kernel_size_1, kernel_size_2]`
56    pub weight: Param<Tensor<B, 4>>,
57    /// Tensor of shape `[channels_out]`
58    pub bias: Option<Param<Tensor<B, 1>>>,
59    /// Stride of the convolution.
60    pub stride: [usize; 2],
61    /// Size of the kernel.
62    pub kernel_size: [usize; 2],
63    /// Spacing between kernel elements.
64    pub dilation: [usize; 2],
65    /// Controls the connections between input and output channels.
66    pub groups: usize,
67    /// Padding configuration.
68    pub padding: [usize; 2],
69    /// Padding output configuration.
70    pub padding_out: [usize; 2],
71    /// Number of channels.
72    pub channels: [usize; 2],
73}
74
75impl<B: Backend> ModuleDisplay for ConvTranspose2d<B> {
76    fn custom_settings(&self) -> Option<DisplaySettings> {
77        DisplaySettings::new()
78            .with_new_line_after_attribute(false)
79            .optional()
80    }
81
82    fn custom_content(&self, content: Content) -> Option<Content> {
83        content
84            .add("channels", &format!("{:?}", &self.channels))
85            .add("stride", &format!("{:?}", &self.stride))
86            .add("kernel_size", &format!("{:?}", &self.kernel_size))
87            .add("dilation", &format!("{:?}", &self.dilation))
88            .add("groups", &self.groups)
89            .add("padding", &format!("{:?}", &self.padding))
90            .add("padding_out", &format!("{:?}", &self.padding_out))
91            .optional()
92    }
93}
94
95impl ConvTranspose2dConfig {
96    /// Initialize a new [conv transpose 2d](ConvTranspose2d) module.
97    pub fn init<B: Backend>(&self, device: &B::Device) -> ConvTranspose2d<B> {
98        checks::checks_channels_div_groups(self.channels[0], self.channels[1], self.groups);
99
100        let shape = [
101            self.channels[0],
102            self.channels[1] / self.groups,
103            self.kernel_size[0],
104            self.kernel_size[1],
105        ];
106
107        let fan_in = self.channels[1] / self.groups * self.kernel_size.iter().product::<usize>();
108        let weight = self
109            .initializer
110            .init_with(shape, Some(fan_in), None, device);
111        let mut bias = None;
112
113        if self.bias {
114            bias = Some(
115                self.initializer
116                    .init_with([self.channels[1]], Some(fan_in), None, device),
117            );
118        }
119
120        ConvTranspose2d {
121            weight,
122            bias,
123            stride: self.stride,
124            kernel_size: self.kernel_size,
125            dilation: self.dilation,
126            groups: self.groups,
127            padding: self.padding,
128            padding_out: self.padding_out,
129            channels: self.channels,
130        }
131    }
132}
133
134impl<B: Backend> ConvTranspose2d<B> {
135    /// Applies the forward pass on the input tensor.
136    ///
137    /// See also [conv_transpose2d](crate::tensor::module::conv_transpose2d).
138    ///
139    /// # Shapes
140    ///
141    /// - input: `[batch_size, channels_in, height_in, width_in]`
142    /// - output: `[batch_size, channels_out, height_out, width_out]`
143    pub fn forward(&self, input: Tensor<B, 4>) -> Tensor<B, 4> {
144        conv_transpose2d(
145            input,
146            self.weight.val(),
147            self.bias.as_ref().map(|bias| bias.val()),
148            ConvTransposeOptions::new(
149                self.stride,
150                self.padding,
151                self.padding_out,
152                self.dilation,
153                self.groups,
154            ),
155        )
156    }
157}
158
159#[cfg(test)]
160mod tests {
161    use super::*;
162    use crate::TestBackend;
163    use crate::tensor::TensorData;
164    use burn_tensor::{Tolerance, ops::FloatElem};
165    type FT = FloatElem<TestBackend>;
166
167    #[test]
168    fn initializer_default() {
169        TestBackend::seed(0);
170
171        let config = ConvTranspose2dConfig::new([5, 1], [5, 5]);
172        let k = (config.channels[1] * config.kernel_size[0] * config.kernel_size[1]) as f64;
173        let k = (config.groups as f64 / k).sqrt() as f32;
174        let conv = config.init::<TestBackend>(&Default::default());
175
176        conv.weight.to_data().assert_within_range(-k..k);
177    }
178
179    #[test]
180    fn initializer_zeros() {
181        TestBackend::seed(0);
182
183        let config =
184            ConvTranspose2dConfig::new([5, 2], [5, 5]).with_initializer(Initializer::Zeros);
185        let conv = config.init::<TestBackend>(&Default::default());
186
187        assert_eq!(config.initializer, Initializer::Zeros);
188        conv.weight.to_data().assert_approx_eq::<FT>(
189            &TensorData::zeros::<f32, _>(conv.weight.shape()),
190            Tolerance::default(),
191        );
192    }
193
194    #[test]
195    fn display() {
196        let config = ConvTranspose2dConfig::new([5, 2], [5, 5]);
197        let conv = config.init::<TestBackend>(&Default::default());
198
199        assert_eq!(
200            format!("{}", conv),
201            "ConvTranspose2d {channels: [5, 2], stride: [1, 1], kernel_size: [5, 5], dilation: [1, 1], groups: 1, padding: [0, 0], padding_out: [0, 0], params: 252}"
202        );
203    }
204
205    #[test]
206    #[should_panic = "Number of channels in input tensor and input channels of convolution must be equal. got: 4, expected: 5"]
207    fn input_channels_mismatch() {
208        let config = ConvTranspose2dConfig::new([5, 3], [3, 3]);
209        let conv = config.init::<TestBackend>(&Default::default());
210
211        let input = Tensor::<TestBackend, 4>::zeros([1, 4, 10, 10], &Default::default());
212        let _ = conv.forward(input);
213    }
214}