1use alloc::format;
2
3use crate as burn;
4
5use crate::config::Config;
6use crate::module::Content;
7use crate::module::DisplaySettings;
8use crate::module::Module;
9use crate::module::ModuleDisplay;
10use crate::module::Param;
11use crate::nn::Initializer;
12use crate::nn::conv::checks;
13use crate::tensor::Tensor;
14use crate::tensor::backend::Backend;
15use crate::tensor::module::conv_transpose1d;
16use crate::tensor::ops::ConvTransposeOptions;
17
18#[derive(Config, Debug)]
21pub struct ConvTranspose1dConfig {
22 pub channels: [usize; 2],
24 pub kernel_size: usize,
26 #[config(default = "1")]
28 pub stride: usize,
29 #[config(default = "1")]
31 pub dilation: usize,
32 #[config(default = "1")]
34 pub groups: usize,
35 #[config(default = "0")]
37 pub padding: usize,
38 #[config(default = "0")]
40 pub padding_out: usize,
41 #[config(default = true)]
43 pub bias: bool,
44 #[config(
46 default = "Initializer::KaimingUniform{gain:1.0/num_traits::Float::sqrt(3.0),fan_out_only:false}"
47 )]
48 pub initializer: Initializer,
49}
50
51#[derive(Module, Debug)]
53#[module(custom_display)]
54pub struct ConvTranspose1d<B: Backend> {
55 pub weight: Param<Tensor<B, 3>>,
57 pub bias: Option<Param<Tensor<B, 1>>>,
59 pub stride: usize,
61 pub kernel_size: usize,
63 pub dilation: usize,
65 pub groups: usize,
67 pub padding: usize,
69 pub padding_out: usize,
71 pub channels: [usize; 2],
73}
74
75impl<B: Backend> ModuleDisplay for ConvTranspose1d<B> {
76 fn custom_settings(&self) -> Option<DisplaySettings> {
77 DisplaySettings::new()
78 .with_new_line_after_attribute(false)
79 .optional()
80 }
81
82 fn custom_content(&self, content: Content) -> Option<Content> {
83 content
84 .add("channels", &format!("{:?}", &self.channels))
85 .add("stride", &self.stride)
86 .add("kernel_size", &self.kernel_size)
87 .add("dilation", &self.dilation)
88 .add("groups", &self.groups)
89 .add("padding", &self.padding)
90 .add("padding_out", &self.padding_out)
91 .optional()
92 }
93}
94
95impl ConvTranspose1dConfig {
96 pub fn init<B: Backend>(&self, device: &B::Device) -> ConvTranspose1d<B> {
98 checks::checks_channels_div_groups(self.channels[0], self.channels[1], self.groups);
99
100 let shape = [
101 self.channels[0],
102 self.channels[1] / self.groups,
103 self.kernel_size,
104 ];
105
106 let fan_in = self.channels[1] / self.groups * self.kernel_size;
107 let weight = self
108 .initializer
109 .init_with(shape, Some(fan_in), None, device);
110 let mut bias = None;
111
112 if self.bias {
113 bias = Some(
114 self.initializer
115 .init_with([self.channels[1]], Some(fan_in), None, device),
116 );
117 }
118
119 ConvTranspose1d {
120 weight,
121 bias,
122 stride: self.stride,
123 kernel_size: self.kernel_size,
124 dilation: self.dilation,
125 groups: self.groups,
126 padding: self.padding,
127 padding_out: self.padding_out,
128 channels: self.channels,
129 }
130 }
131}
132
133impl<B: Backend> ConvTranspose1d<B> {
134 pub fn forward(&self, input: Tensor<B, 3>) -> Tensor<B, 3> {
143 conv_transpose1d(
144 input,
145 self.weight.val(),
146 self.bias.as_ref().map(|bias| bias.val()),
147 ConvTransposeOptions::new(
148 [self.stride],
149 [self.padding],
150 [self.padding_out],
151 [self.dilation],
152 self.groups,
153 ),
154 )
155 }
156}
157
158#[cfg(test)]
159mod tests {
160 use burn_tensor::Tolerance;
161
162 use super::*;
163 use crate::TestBackend;
164 use crate::tensor::TensorData;
165
166 #[test]
167 fn initializer_default() {
168 TestBackend::seed(0);
169
170 let config = ConvTranspose1dConfig::new([5, 1], 5);
171 let k = (config.channels[1] * config.kernel_size) as f64;
172 let k = (config.groups as f64 / k).sqrt() as f32;
173 let conv = config.init::<TestBackend>(&Default::default());
174
175 conv.weight.to_data().assert_within_range(-k..k);
176 }
177
178 #[test]
179 fn initializer_zeros() {
180 TestBackend::seed(0);
181
182 let config = ConvTranspose1dConfig::new([5, 2], 5).with_initializer(Initializer::Zeros);
183 let conv = config.init::<TestBackend>(&Default::default());
184
185 assert_eq!(config.initializer, Initializer::Zeros);
186 conv.weight.to_data().assert_approx_eq::<f32>(
187 &TensorData::zeros::<f32, _>(conv.weight.shape()),
188 Tolerance::default(),
189 );
190 }
191
192 #[test]
193 fn display() {
194 let config = ConvTranspose1dConfig::new([5, 2], 5);
195 let conv = config.init::<TestBackend>(&Default::default());
196
197 assert_eq!(
198 format!("{}", conv),
199 "ConvTranspose1d {channels: [5, 2], stride: 1, kernel_size: 5, dilation: 1, groups: 1, padding: 0, padding_out: 0, params: 52}"
200 );
201 }
202
203 #[test]
204 #[should_panic = "Number of channels in input tensor and input channels of convolution must be equal. got: 4, expected: 5"]
205 fn input_channels_mismatch() {
206 let config = ConvTranspose1dConfig::new([5, 3], 3);
207 let conv = config.init::<TestBackend>(&Default::default());
208
209 let input = Tensor::<TestBackend, 3>::zeros([1, 4, 10], &Default::default());
210 let _ = conv.forward(input);
211 }
212}