Trait burn_tensor::ops::ModuleOps
source · pub trait ModuleOps<B: Backend> {
Show 27 methods
// Required methods
fn conv2d(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
options: ConvOptions<2>
) -> FloatTensor<B, 4>;
fn conv_transpose2d(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
options: ConvTransposeOptions<2>
) -> FloatTensor<B, 4>;
fn avg_pool2d(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool
) -> FloatTensor<B, 4>;
fn avg_pool2d_backward(
x: FloatTensor<B, 4>,
grad: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool
) -> FloatTensor<B, 4>;
fn adaptive_avg_pool2d(
x: FloatTensor<B, 4>,
output_size: [usize; 2]
) -> FloatTensor<B, 4>;
fn adaptive_avg_pool2d_backward(
x: FloatTensor<B, 4>,
grad: FloatTensor<B, 4>
) -> FloatTensor<B, 4>;
fn max_pool2d(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2]
) -> FloatTensor<B, 4>;
fn max_pool2d_with_indices(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2]
) -> MaxPool2dWithIndices<B>;
fn max_pool2d_with_indices_backward(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
output_grad: FloatTensor<B, 4>,
indices: IntTensor<B, 4>
) -> MaxPool2dBackward<B>;
fn interpolate(
x: FloatTensor<B, 4>,
output_size: [usize; 2],
options: InterpolateOptions
) -> FloatTensor<B, 4>;
fn interpolate_backward(
x: FloatTensor<B, 4>,
grad: FloatTensor<B, 4>,
output_size: [usize; 2],
options: InterpolateOptions
) -> FloatTensor<B, 4>;
// Provided methods
fn embedding(
weights: FloatTensor<B, 2>,
indices: IntTensor<B, 2>
) -> FloatTensor<B, 3> { ... }
fn embedding_backward(
weights: FloatTensor<B, 2>,
output_grad: FloatTensor<B, 3>,
indices: IntTensor<B, 2>
) -> FloatTensor<B, 2> { ... }
fn conv1d(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
options: ConvOptions<1>
) -> FloatTensor<B, 3> { ... }
fn conv1d_backward(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 3>,
options: ConvOptions<1>
) -> Conv1dBackward<B> { ... }
fn conv2d_backward(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 4>,
options: ConvOptions<2>
) -> Conv2dBackward<B> { ... }
fn conv_transpose1d(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
options: ConvTransposeOptions<1>
) -> FloatTensor<B, 3> { ... }
fn conv_transpose1d_backward(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 3>,
options: ConvTransposeOptions<1>
) -> Conv1dBackward<B> { ... }
fn conv_transpose2d_backward(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 4>,
options: ConvTransposeOptions<2>
) -> Conv2dBackward<B> { ... }
fn unfold4d(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
options: UnfoldOptions
) -> FloatTensor<B, 3> { ... }
fn avg_pool1d(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool
) -> FloatTensor<B, 3> { ... }
fn avg_pool1d_backward(
x: FloatTensor<B, 3>,
grad: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool
) -> FloatTensor<B, 3> { ... }
fn adaptive_avg_pool1d(
x: FloatTensor<B, 3>,
output_size: usize
) -> FloatTensor<B, 3> { ... }
fn adaptive_avg_pool1d_backward(
x: FloatTensor<B, 3>,
grad: FloatTensor<B, 3>
) -> FloatTensor<B, 3> { ... }
fn max_pool1d(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize
) -> FloatTensor<B, 3> { ... }
fn max_pool1d_with_indices(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize
) -> MaxPool1dWithIndices<B> { ... }
fn max_pool1d_with_indices_backward(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
output_grad: FloatTensor<B, 3>,
indices: IntTensor<B, 3>
) -> MaxPool1dBackward<B> { ... }
}
Expand description
Module operations trait.
Required Methods§
sourcefn conv2d(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
options: ConvOptions<2>
) -> FloatTensor<B, 4>
fn conv2d( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<2> ) -> FloatTensor<B, 4>
Two dimensional convolution.
§Shapes
x: [batch_size, channels_in, height, width]
,
weight: [channels_out, channels_in, kernel_size_1, kernel_size_2]
,
bias: [channels_out]
,
sourcefn conv_transpose2d(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
options: ConvTransposeOptions<2>
) -> FloatTensor<B, 4>
fn conv_transpose2d( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<2> ) -> FloatTensor<B, 4>
Two dimensional transposed convolution.
§Shapes
x: [batch_size, channels_in, height, width]
,
weight: [channels_in, channels_out, kernel_size_1, kernel_size_2]
,
bias: [channels_out]
,
sourcefn avg_pool2d(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool
) -> FloatTensor<B, 4>
fn avg_pool2d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool ) -> FloatTensor<B, 4>
sourcefn avg_pool2d_backward(
x: FloatTensor<B, 4>,
grad: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool
) -> FloatTensor<B, 4>
fn avg_pool2d_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool ) -> FloatTensor<B, 4>
Backward pass for the avg pooling 2d operation.
sourcefn adaptive_avg_pool2d(
x: FloatTensor<B, 4>,
output_size: [usize; 2]
) -> FloatTensor<B, 4>
fn adaptive_avg_pool2d( x: FloatTensor<B, 4>, output_size: [usize; 2] ) -> FloatTensor<B, 4>
sourcefn adaptive_avg_pool2d_backward(
x: FloatTensor<B, 4>,
grad: FloatTensor<B, 4>
) -> FloatTensor<B, 4>
fn adaptive_avg_pool2d_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4> ) -> FloatTensor<B, 4>
Backward pass for the adaptive avg pooling 2d operation.
sourcefn max_pool2d(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2]
) -> FloatTensor<B, 4>
fn max_pool2d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2] ) -> FloatTensor<B, 4>
sourcefn max_pool2d_with_indices(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2]
) -> MaxPool2dWithIndices<B>
fn max_pool2d_with_indices( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2] ) -> MaxPool2dWithIndices<B>
sourcefn max_pool2d_with_indices_backward(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
output_grad: FloatTensor<B, 4>,
indices: IntTensor<B, 4>
) -> MaxPool2dBackward<B>
fn max_pool2d_with_indices_backward( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], output_grad: FloatTensor<B, 4>, indices: IntTensor<B, 4> ) -> MaxPool2dBackward<B>
Backward pass for the max pooling 2d operation.
sourcefn interpolate(
x: FloatTensor<B, 4>,
output_size: [usize; 2],
options: InterpolateOptions
) -> FloatTensor<B, 4>
fn interpolate( x: FloatTensor<B, 4>, output_size: [usize; 2], options: InterpolateOptions ) -> FloatTensor<B, 4>
sourcefn interpolate_backward(
x: FloatTensor<B, 4>,
grad: FloatTensor<B, 4>,
output_size: [usize; 2],
options: InterpolateOptions
) -> FloatTensor<B, 4>
fn interpolate_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, output_size: [usize; 2], options: InterpolateOptions ) -> FloatTensor<B, 4>
Backward pass for the interpolate operation.
Provided Methods§
sourcefn embedding(
weights: FloatTensor<B, 2>,
indices: IntTensor<B, 2>
) -> FloatTensor<B, 3>
fn embedding( weights: FloatTensor<B, 2>, indices: IntTensor<B, 2> ) -> FloatTensor<B, 3>
sourcefn embedding_backward(
weights: FloatTensor<B, 2>,
output_grad: FloatTensor<B, 3>,
indices: IntTensor<B, 2>
) -> FloatTensor<B, 2>
fn embedding_backward( weights: FloatTensor<B, 2>, output_grad: FloatTensor<B, 3>, indices: IntTensor<B, 2> ) -> FloatTensor<B, 2>
sourcefn conv1d(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
options: ConvOptions<1>
) -> FloatTensor<B, 3>
fn conv1d( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<1> ) -> FloatTensor<B, 3>
One dimensional convolution.
§Shapes
x: [batch_size, channels_in, length]
,
weight: [channels_out, channels_in, kernel_size]
,
bias: [channels_out]
,
sourcefn conv1d_backward(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 3>,
options: ConvOptions<1>
) -> Conv1dBackward<B>
fn conv1d_backward( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 3>, options: ConvOptions<1> ) -> Conv1dBackward<B>
Backward pass for the conv1d operation.
sourcefn conv2d_backward(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 4>,
options: ConvOptions<2>
) -> Conv2dBackward<B>
fn conv2d_backward( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 4>, options: ConvOptions<2> ) -> Conv2dBackward<B>
Backward pass for the conv2d operation.
sourcefn conv_transpose1d(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
options: ConvTransposeOptions<1>
) -> FloatTensor<B, 3>
fn conv_transpose1d( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<1> ) -> FloatTensor<B, 3>
One dimensional transposed convolution.
§Shapes
x: [batch_size, channels_in, length]
,
weight: [channels_in, channels_out, length]
,
bias: [channels_out]
,
sourcefn conv_transpose1d_backward(
x: FloatTensor<B, 3>,
weight: FloatTensor<B, 3>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 3>,
options: ConvTransposeOptions<1>
) -> Conv1dBackward<B>
fn conv_transpose1d_backward( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 3>, options: ConvTransposeOptions<1> ) -> Conv1dBackward<B>
Backward pass for the conv transpose 1d operation.
sourcefn conv_transpose2d_backward(
x: FloatTensor<B, 4>,
weight: FloatTensor<B, 4>,
bias: Option<FloatTensor<B, 1>>,
output_grad: FloatTensor<B, 4>,
options: ConvTransposeOptions<2>
) -> Conv2dBackward<B>
fn conv_transpose2d_backward( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 4>, options: ConvTransposeOptions<2> ) -> Conv2dBackward<B>
Backward pass for the conv transpose 2d operation.
sourcefn unfold4d(
x: FloatTensor<B, 4>,
kernel_size: [usize; 2],
options: UnfoldOptions
) -> FloatTensor<B, 3>
fn unfold4d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], options: UnfoldOptions ) -> FloatTensor<B, 3>
Four-dimensional unfolding.
§Shapes
x: [batch_size, channels_in, height, width]
,
returns: [batch_size, channels_in * kernel_size_1 * kernel_size_2, number of blocks]
,
sourcefn avg_pool1d(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool
) -> FloatTensor<B, 3>
fn avg_pool1d( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool ) -> FloatTensor<B, 3>
sourcefn avg_pool1d_backward(
x: FloatTensor<B, 3>,
grad: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool
) -> FloatTensor<B, 3>
fn avg_pool1d_backward( x: FloatTensor<B, 3>, grad: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool ) -> FloatTensor<B, 3>
Backward pass for the avg pooling 1d operation.
sourcefn adaptive_avg_pool1d(
x: FloatTensor<B, 3>,
output_size: usize
) -> FloatTensor<B, 3>
fn adaptive_avg_pool1d( x: FloatTensor<B, 3>, output_size: usize ) -> FloatTensor<B, 3>
sourcefn adaptive_avg_pool1d_backward(
x: FloatTensor<B, 3>,
grad: FloatTensor<B, 3>
) -> FloatTensor<B, 3>
fn adaptive_avg_pool1d_backward( x: FloatTensor<B, 3>, grad: FloatTensor<B, 3> ) -> FloatTensor<B, 3>
Backward pass for the adaptive avg pooling 1d operation.
sourcefn max_pool1d(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize
) -> FloatTensor<B, 3>
fn max_pool1d( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize ) -> FloatTensor<B, 3>
sourcefn max_pool1d_with_indices(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize
) -> MaxPool1dWithIndices<B>
fn max_pool1d_with_indices( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize ) -> MaxPool1dWithIndices<B>
sourcefn max_pool1d_with_indices_backward(
x: FloatTensor<B, 3>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
output_grad: FloatTensor<B, 3>,
indices: IntTensor<B, 3>
) -> MaxPool1dBackward<B>
fn max_pool1d_with_indices_backward( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, output_grad: FloatTensor<B, 3>, indices: IntTensor<B, 3> ) -> MaxPool1dBackward<B>
Backward pass for the max pooling 1d operation.