use alloc::format;
use crate as burn;
use crate::{
config::Config,
module::{Content, DisplaySettings, Ignored, Module, ModuleDisplay, Param},
nn::{conv::checks, Initializer, PaddingConfig1d},
tensor::{backend::Backend, module::conv1d, ops::ConvOptions, Tensor},
};
#[derive(Config, Debug)]
pub struct Conv1dConfig {
pub channels_in: usize,
pub channels_out: usize,
pub kernel_size: usize,
#[config(default = "1")]
pub stride: usize,
#[config(default = "1")]
pub dilation: usize,
#[config(default = "1")]
pub groups: usize,
#[config(default = "PaddingConfig1d::Valid")]
pub padding: PaddingConfig1d,
#[config(default = true)]
pub bias: bool,
#[config(
default = "Initializer::KaimingUniform{gain:1.0/num_traits::Float::sqrt(3.0),fan_out_only:false}"
)]
pub initializer: Initializer,
}
#[derive(Module, Debug)]
#[module(custom_display)]
pub struct Conv1d<B: Backend> {
pub weight: Param<Tensor<B, 3>>,
pub bias: Option<Param<Tensor<B, 1>>>,
pub stride: usize,
pub kernel_size: usize,
pub dilation: usize,
pub groups: usize,
pub padding: Ignored<PaddingConfig1d>,
}
impl<B: Backend> ModuleDisplay for Conv1d<B> {
fn custom_settings(&self) -> Option<DisplaySettings> {
DisplaySettings::new()
.with_new_line_after_attribute(false)
.optional()
}
fn custom_content(&self, content: Content) -> Option<Content> {
let padding_formatted = format!("{}", &self.padding);
content
.add("stride", &self.stride)
.add("kernel_size", &self.kernel_size)
.add("dilation", &self.dilation)
.add("groups", &self.groups)
.add("padding", &padding_formatted)
.optional()
}
}
impl Conv1dConfig {
pub fn init<B: Backend>(&self, device: &B::Device) -> Conv1d<B> {
checks::checks_channels_div_groups(self.channels_in, self.channels_out, self.groups);
let shape = [
self.channels_out,
self.channels_in / self.groups,
self.kernel_size,
];
let fan_in: usize = self.channels_in / self.groups * self.kernel_size;
let weight = self
.initializer
.init_with(shape, Some(fan_in), None, device);
let mut bias = None;
if self.bias {
bias =
Some(
self.initializer
.init_with([self.channels_out], Some(fan_in), None, device),
);
}
Conv1d {
weight,
bias,
stride: self.stride,
kernel_size: self.kernel_size,
padding: Ignored(self.padding.clone()),
dilation: self.dilation,
groups: self.groups,
}
}
}
impl<B: Backend> Conv1d<B> {
pub fn forward(&self, input: Tensor<B, 3>) -> Tensor<B, 3> {
let [_batch_size, _channels, length] = input.dims();
let padding = self
.padding
.calculate_padding_1d(length, self.kernel_size, self.stride);
conv1d(
input,
self.weight.val(),
self.bias.as_ref().map(|bias| bias.val()),
ConvOptions::new([self.stride], [padding], [self.dilation], self.groups),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::TensorData;
use crate::TestBackend;
#[test]
fn initializer_default() {
TestBackend::seed(0);
let config = Conv1dConfig::new(5, 5, 5);
let k = (config.channels_in * config.kernel_size) as f64;
let k = (config.groups as f64 / k).sqrt() as f32;
let conv = config.init::<TestBackend>(&Default::default());
conv.weight.to_data().assert_within_range(-k..k);
}
#[test]
fn initializer_zeros() {
TestBackend::seed(0);
let config = Conv1dConfig::new(5, 5, 5).with_initializer(Initializer::Zeros);
let conv = config.init::<TestBackend>(&Default::default());
assert_eq!(config.initializer, Initializer::Zeros);
conv.weight
.to_data()
.assert_approx_eq(&TensorData::zeros::<f32, _>(conv.weight.shape()), 3);
}
#[test]
fn display() {
let config = Conv1dConfig::new(5, 5, 5);
let conv = config.init::<TestBackend>(&Default::default());
assert_eq!(
alloc::format!("{}", conv),
"Conv1d {stride: 1, kernel_size: 5, dilation: 1, groups: 1, padding: Valid, params: 130}"
);
}
}