use crate as burn;
use crate::config::Config;
use crate::module::Module;
use crate::module::Param;
use crate::nn::Initializer;
use crate::tensor::backend::Backend;
use crate::tensor::Tensor;
use burn_tensor::module::conv1d;
use burn_tensor::ops::conv::calculate_conv_padding;
use burn_tensor::ops::ConvOptions;
use libm::sqrt;
#[derive(Config)]
pub struct Conv1dConfig {
pub channels_in: usize,
pub channels_out: usize,
pub kernel_size: usize,
#[config(default = "1")]
pub stride: usize,
#[config(default = "1")]
pub dilation: usize,
#[config(default = "1")]
pub groups: usize,
#[config(default = "Conv1dPaddingConfig::Valid")]
pub padding: Conv1dPaddingConfig,
#[config(default = true)]
pub bias: bool,
#[config(default = "Initializer::UniformDefault")]
pub initializer: Initializer,
}
#[derive(Module, Config, Debug)]
pub enum Conv1dPaddingConfig {
Same,
Valid,
Explicit(usize),
}
#[derive(Module, Debug)]
pub struct Conv1d<B: Backend> {
weight: Param<Tensor<B, 3>>,
bias: Option<Param<Tensor<B, 1>>>,
stride: usize,
kernel_size: usize,
dilation: usize,
groups: usize,
padding: Conv1dPaddingConfig,
}
impl Conv1dConfig {
pub fn init<B: Backend>(&self) -> Conv1d<B> {
let k = (self.channels_in * self.kernel_size) as f64;
let k = sqrt(1.0 / k);
let initializer = if let Initializer::UniformDefault = self.initializer {
Initializer::Uniform(-k, k)
} else {
self.initializer.clone()
};
let weight = initializer.init([self.channels_out, self.channels_in, self.kernel_size]);
let bias = if self.bias {
Some(Param::from(initializer.init([self.channels_out])))
} else {
None
};
Conv1d {
weight: Param::from(weight),
bias,
stride: 1, kernel_size: self.kernel_size,
padding: self.padding.clone(),
dilation: self.dilation,
groups: self.groups,
}
}
pub fn init_with<B: Backend>(&self, record: Conv1dRecord<B>) -> Conv1d<B> {
Conv1d {
weight: record.weight,
bias: record.bias,
stride: 1, kernel_size: self.kernel_size,
padding: self.padding.clone(),
dilation: self.dilation,
groups: self.groups,
}
}
}
impl<B: Backend> Conv1d<B> {
pub fn forward(&self, input: Tensor<B, 3>) -> Tensor<B, 3> {
let same_padding = || {
let [_batch_size, _channels_in, length] = input.dims();
calculate_conv_padding(self.kernel_size, self.stride, length, length)
};
let padding = match &self.padding {
Conv1dPaddingConfig::Valid => 0,
Conv1dPaddingConfig::Same => same_padding(),
Conv1dPaddingConfig::Explicit(value) => *value,
};
conv1d(
input,
self.weight.val(),
self.bias.as_ref().map(|bias| bias.val()),
ConvOptions::new([self.stride], [padding], [self.dilation], self.groups),
)
}
}
#[cfg(test)]
mod tests {
use burn_tensor::Data;
use super::*;
use crate::TestBackend;
#[test]
fn initializer_default() {
TestBackend::seed(0);
let config = Conv1dConfig::new(5, 5, 5);
let k = (config.channels_in * config.kernel_size) as f64;
let k = sqrt(1.0 / k) as f32;
let conv = config.init::<TestBackend>();
assert_eq!(config.initializer, Initializer::UniformDefault);
conv.weight.to_data().assert_in_range(-k, k);
}
#[test]
fn initializer_zeros() {
TestBackend::seed(0);
let config = Conv1dConfig::new(5, 5, 5).with_initializer(Initializer::Zeros);
let conv = config.init::<TestBackend>();
assert_eq!(config.initializer, Initializer::Zeros);
conv.weight
.to_data()
.assert_approx_eq(&Data::zeros(conv.weight.shape()), 3);
}
}