1use alloc::format;
2
3use burn_core as burn;
4
5use crate::conv::checks;
6use burn::config::Config;
7use burn::module::Content;
8use burn::module::DisplaySettings;
9use burn::module::Initializer;
10use burn::module::Module;
11use burn::module::ModuleDisplay;
12use burn::module::Param;
13use burn::tensor::Tensor;
14use burn::tensor::backend::Backend;
15use burn::tensor::module::conv_transpose1d;
16use burn::tensor::ops::ConvTransposeOptions;
17
18#[derive(Config, Debug)]
21pub struct ConvTranspose1dConfig {
22 pub channels: [usize; 2],
24 pub kernel_size: usize,
26 #[config(default = "1")]
28 pub stride: usize,
29 #[config(default = "1")]
31 pub dilation: usize,
32 #[config(default = "1")]
34 pub groups: usize,
35 #[config(default = "0")]
37 pub padding: usize,
38 #[config(default = "0")]
40 pub padding_out: usize,
41 #[config(default = true)]
43 pub bias: bool,
44 #[config(
46 default = "Initializer::KaimingUniform{gain:1.0/num_traits::Float::sqrt(3.0),fan_out_only:false}"
47 )]
48 pub initializer: Initializer,
49}
50
51#[derive(Module, Debug)]
53#[module(custom_display)]
54pub struct ConvTranspose1d<B: Backend> {
55 pub weight: Param<Tensor<B, 3>>,
57 pub bias: Option<Param<Tensor<B, 1>>>,
59 pub stride: usize,
61 pub kernel_size: usize,
63 pub dilation: usize,
65 pub groups: usize,
67 pub padding: usize,
69 pub padding_out: usize,
71 pub channels: [usize; 2],
73}
74
75impl<B: Backend> ModuleDisplay for ConvTranspose1d<B> {
76 fn custom_settings(&self) -> Option<DisplaySettings> {
77 DisplaySettings::new()
78 .with_new_line_after_attribute(false)
79 .optional()
80 }
81
82 fn custom_content(&self, content: Content) -> Option<Content> {
83 content
84 .add("channels", &format!("{:?}", &self.channels))
85 .add("stride", &self.stride)
86 .add("kernel_size", &self.kernel_size)
87 .add("dilation", &self.dilation)
88 .add("groups", &self.groups)
89 .add("padding", &self.padding)
90 .add("padding_out", &self.padding_out)
91 .optional()
92 }
93}
94
95impl ConvTranspose1dConfig {
96 pub fn init<B: Backend>(&self, device: &B::Device) -> ConvTranspose1d<B> {
98 checks::checks_channels_div_groups(self.channels[0], self.channels[1], self.groups);
99
100 let shape = [
101 self.channels[0],
102 self.channels[1] / self.groups,
103 self.kernel_size,
104 ];
105
106 let fan_in = self.channels[1] / self.groups * self.kernel_size;
107 let weight = self
108 .initializer
109 .init_with(shape, Some(fan_in), None, device);
110 let mut bias = None;
111
112 if self.bias {
113 bias = Some(
114 self.initializer
115 .init_with([self.channels[1]], Some(fan_in), None, device),
116 );
117 }
118
119 ConvTranspose1d {
120 weight,
121 bias,
122 stride: self.stride,
123 kernel_size: self.kernel_size,
124 dilation: self.dilation,
125 groups: self.groups,
126 padding: self.padding,
127 padding_out: self.padding_out,
128 channels: self.channels,
129 }
130 }
131}
132
133impl<B: Backend> ConvTranspose1d<B> {
134 pub fn forward(&self, input: Tensor<B, 3>) -> Tensor<B, 3> {
143 conv_transpose1d(
144 input,
145 self.weight.val(),
146 self.bias.as_ref().map(|bias| bias.val()),
147 ConvTransposeOptions::new(
148 [self.stride],
149 [self.padding],
150 [self.padding_out],
151 [self.dilation],
152 self.groups,
153 ),
154 )
155 }
156}
157
158#[cfg(test)]
159mod tests {
160 use burn::tensor::ops::FloatElem;
161 use burn::tensor::{ElementConversion, Tolerance};
162
163 use super::*;
164 use crate::TestBackend;
165 use burn::tensor::TensorData;
166 type FT = FloatElem<TestBackend>;
167
168 #[test]
169 fn initializer_default() {
170 let device = Default::default();
171 TestBackend::seed(&device, 0);
172
173 let config = ConvTranspose1dConfig::new([5, 1], 5);
174 let k = (config.channels[1] * config.kernel_size) as f64;
175 let k = (config.groups as f64 / k).sqrt().elem::<FT>();
176 let conv = config.init::<TestBackend>(&Default::default());
177
178 conv.weight.to_data().assert_within_range(-k..k);
179 }
180
181 #[test]
182 fn initializer_zeros() {
183 let device = Default::default();
184 TestBackend::seed(&device, 0);
185
186 let config = ConvTranspose1dConfig::new([5, 2], 5).with_initializer(Initializer::Zeros);
187 let conv = config.init::<TestBackend>(&Default::default());
188
189 assert_eq!(config.initializer, Initializer::Zeros);
190 conv.weight.to_data().assert_approx_eq::<f32>(
191 &TensorData::zeros::<f32, _>(conv.weight.shape()),
192 Tolerance::default(),
193 );
194 }
195
196 #[test]
197 fn display() {
198 let config = ConvTranspose1dConfig::new([5, 2], 5);
199 let conv = config.init::<TestBackend>(&Default::default());
200
201 assert_eq!(
202 format!("{conv}"),
203 "ConvTranspose1d {channels: [5, 2], stride: 1, kernel_size: 5, dilation: 1, groups: 1, padding: 0, padding_out: 0, params: 52}"
204 );
205 }
206
207 #[test]
208 #[should_panic = "Number of channels in input tensor and input channels of convolution must be equal. got: 4, expected: 5"]
209 fn input_channels_mismatch() {
210 let config = ConvTranspose1dConfig::new([5, 3], 3);
211 let conv = config.init::<TestBackend>(&Default::default());
212
213 let input = Tensor::<TestBackend, 3>::zeros([1, 4, 10], &Default::default());
214 let _ = conv.forward(input);
215 }
216}