Trait burn_tensor::ops::ModuleOps

source ·
pub trait ModuleOps<B: Backend> {
Show 31 methods // Required methods fn conv2d( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<2>, ) -> FloatTensor<B, 4>; fn conv3d( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<3>, ) -> FloatTensor<B, 5>; fn conv_transpose2d( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B, 4>; fn conv_transpose3d( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B, 5>; fn avg_pool2d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B, 4>; fn avg_pool2d_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B, 4>; fn adaptive_avg_pool2d( x: FloatTensor<B, 4>, output_size: [usize; 2], ) -> FloatTensor<B, 4>; fn adaptive_avg_pool2d_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, ) -> FloatTensor<B, 4>; fn max_pool2d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> FloatTensor<B, 4>; fn max_pool2d_with_indices( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> MaxPool2dWithIndices<B>; fn max_pool2d_with_indices_backward( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], output_grad: FloatTensor<B, 4>, indices: IntTensor<B, 4>, ) -> MaxPool2dBackward<B>; fn interpolate( x: FloatTensor<B, 4>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B, 4>; fn interpolate_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B, 4>; // Provided methods fn embedding( weights: FloatTensor<B, 2>, indices: IntTensor<B, 2>, ) -> FloatTensor<B, 3> { ... } fn embedding_backward( weights: FloatTensor<B, 2>, output_grad: FloatTensor<B, 3>, indices: IntTensor<B, 2>, ) -> FloatTensor<B, 2> { ... } fn conv1d( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<1>, ) -> FloatTensor<B, 3> { ... } fn conv1d_backward( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 3>, options: ConvOptions<1>, ) -> Conv1dBackward<B> { ... } fn conv2d_backward( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 4>, options: ConvOptions<2>, ) -> Conv2dBackward<B> { ... } fn conv3d_backward( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 5>, options: ConvOptions<3>, ) -> Conv3dBackward<B> { ... } fn conv_transpose1d( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B, 3> { ... } fn conv_transpose1d_backward( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 3>, options: ConvTransposeOptions<1>, ) -> Conv1dBackward<B> { ... } fn conv_transpose2d_backward( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 4>, options: ConvTransposeOptions<2>, ) -> Conv2dBackward<B> { ... } fn conv_transpose3d_backward( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 5>, options: ConvTransposeOptions<3>, ) -> Conv3dBackward<B> { ... } fn unfold4d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], options: UnfoldOptions, ) -> FloatTensor<B, 3> { ... } fn avg_pool1d( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B, 3> { ... } fn avg_pool1d_backward( x: FloatTensor<B, 3>, grad: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B, 3> { ... } fn adaptive_avg_pool1d( x: FloatTensor<B, 3>, output_size: usize, ) -> FloatTensor<B, 3> { ... } fn adaptive_avg_pool1d_backward( x: FloatTensor<B, 3>, grad: FloatTensor<B, 3>, ) -> FloatTensor<B, 3> { ... } fn max_pool1d( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> FloatTensor<B, 3> { ... } fn max_pool1d_with_indices( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> MaxPool1dWithIndices<B> { ... } fn max_pool1d_with_indices_backward( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, output_grad: FloatTensor<B, 3>, indices: IntTensor<B, 3>, ) -> MaxPool1dBackward<B> { ... }
}
Expand description

Module operations trait.

Required Methods§

source

fn conv2d( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<2>, ) -> FloatTensor<B, 4>

Two dimensional convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2], bias: [channels_out],

source

fn conv3d( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<3>, ) -> FloatTensor<B, 5>

Three dimensional convolution.

§Shapes

x: [batch_size, channels_in, depth, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2, kernel_size_3], bias: [channels_out],

source

fn conv_transpose2d( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B, 4>

Two dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_in, channels_out, kernel_size_1, kernel_size_2], bias: [channels_out],

source

fn conv_transpose3d( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B, 5>

Three dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_in, channels_out, kernel_size_1, kernel_size_2, kernel_size_3], bias: [channels_out],

source

fn avg_pool2d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B, 4>

Two dimensional avg pooling.

§Shapes

x: [batch_size, channels, height, width],

source

fn avg_pool2d_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B, 4>

Backward pass for the avg pooling 2d operation.

source

fn adaptive_avg_pool2d( x: FloatTensor<B, 4>, output_size: [usize; 2], ) -> FloatTensor<B, 4>

Two dimensional adaptive avg pooling.

§Shapes

x: [batch_size, channels, height, width],

source

fn adaptive_avg_pool2d_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, ) -> FloatTensor<B, 4>

Backward pass for the adaptive avg pooling 2d operation.

source

fn max_pool2d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> FloatTensor<B, 4>

Two dimensional max pooling.

§Shapes

x: [batch_size, channels, height, width],

source

fn max_pool2d_with_indices( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> MaxPool2dWithIndices<B>

Two dimensional max pooling with indices.

§Shapes

x: [batch_size, channels, height, width],

source

fn max_pool2d_with_indices_backward( x: FloatTensor<B, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], output_grad: FloatTensor<B, 4>, indices: IntTensor<B, 4>, ) -> MaxPool2dBackward<B>

Backward pass for the max pooling 2d operation.

source

fn interpolate( x: FloatTensor<B, 4>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B, 4>

Down/up samples the input.

§Shapes

x: [batch_size, channels, height, width],

source

fn interpolate_backward( x: FloatTensor<B, 4>, grad: FloatTensor<B, 4>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B, 4>

Backward pass for the interpolate operation.

Provided Methods§

source

fn embedding( weights: FloatTensor<B, 2>, indices: IntTensor<B, 2>, ) -> FloatTensor<B, 3>

Embedding operation.

§Arguments
  • weights - The embedding weights.
  • indices - The indices tensor.
§Returns

The output tensor.

source

fn embedding_backward( weights: FloatTensor<B, 2>, output_grad: FloatTensor<B, 3>, indices: IntTensor<B, 2>, ) -> FloatTensor<B, 2>

Embedding backward operation.

§Arguments
  • weights - The embedding weights.
  • output_grad - The output gradient.
  • indices - The indices tensor.
§Returns

The gradient.

source

fn conv1d( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, options: ConvOptions<1>, ) -> FloatTensor<B, 3>

One dimensional convolution.

§Shapes

x: [batch_size, channels_in, length], weight: [channels_out, channels_in, kernel_size], bias: [channels_out],

source

fn conv1d_backward( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 3>, options: ConvOptions<1>, ) -> Conv1dBackward<B>

Backward pass for the conv1d operation.

source

fn conv2d_backward( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 4>, options: ConvOptions<2>, ) -> Conv2dBackward<B>

Backward pass for the conv2d operation.

source

fn conv3d_backward( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 5>, options: ConvOptions<3>, ) -> Conv3dBackward<B>

Backward pass for the conv3d operation.

source

fn conv_transpose1d( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B, 3>

One dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, length], weight: [channels_in, channels_out, length], bias: [channels_out],

source

fn conv_transpose1d_backward( x: FloatTensor<B, 3>, weight: FloatTensor<B, 3>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 3>, options: ConvTransposeOptions<1>, ) -> Conv1dBackward<B>

Backward pass for the conv transpose 1d operation.

source

fn conv_transpose2d_backward( x: FloatTensor<B, 4>, weight: FloatTensor<B, 4>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 4>, options: ConvTransposeOptions<2>, ) -> Conv2dBackward<B>

Backward pass for the conv transpose 2d operation.

source

fn conv_transpose3d_backward( x: FloatTensor<B, 5>, weight: FloatTensor<B, 5>, bias: Option<FloatTensor<B, 1>>, output_grad: FloatTensor<B, 5>, options: ConvTransposeOptions<3>, ) -> Conv3dBackward<B>

Backward pass for the conv transpose 3d operation.

source

fn unfold4d( x: FloatTensor<B, 4>, kernel_size: [usize; 2], options: UnfoldOptions, ) -> FloatTensor<B, 3>

Four-dimensional unfolding.

§Shapes

x: [batch_size, channels_in, height, width], returns: [batch_size, channels_in * kernel_size_1 * kernel_size_2, number of blocks],

source

fn avg_pool1d( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B, 3>

One dimensional avg pooling.

§Shapes

x: [batch_size, channels, length],

source

fn avg_pool1d_backward( x: FloatTensor<B, 3>, grad: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B, 3>

Backward pass for the avg pooling 1d operation.

source

fn adaptive_avg_pool1d( x: FloatTensor<B, 3>, output_size: usize, ) -> FloatTensor<B, 3>

One dimensional adaptive avg pooling.

§Shapes

x: [batch_size, channels, length],

source

fn adaptive_avg_pool1d_backward( x: FloatTensor<B, 3>, grad: FloatTensor<B, 3>, ) -> FloatTensor<B, 3>

Backward pass for the adaptive avg pooling 1d operation.

source

fn max_pool1d( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> FloatTensor<B, 3>

One dimensional max pooling.

§Shapes

x: [batch_size, channels, length],

source

fn max_pool1d_with_indices( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> MaxPool1dWithIndices<B>

One dimensional max pooling with indices.

§Shapes

x: [batch_size, channels, height, width],

source

fn max_pool1d_with_indices_backward( x: FloatTensor<B, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, output_grad: FloatTensor<B, 3>, indices: IntTensor<B, 3>, ) -> MaxPool1dBackward<B>

Backward pass for the max pooling 1d operation.

Object Safety§

This trait is not object safe.

Implementors§