ModuleOps

Trait ModuleOps 

Source
pub trait ModuleOps<B: Backend> {
Show 45 methods // Required methods fn conv2d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<2>, ) -> FloatTensor<B>; fn deform_conv2d( x: FloatTensor<B>, offset: FloatTensor<B>, weight: FloatTensor<B>, mask: Option<FloatTensor<B>>, bias: Option<FloatTensor<B>>, options: DeformConvOptions<2>, ) -> FloatTensor<B>; fn deform_conv2d_backward( x: FloatTensor<B>, offset: FloatTensor<B>, weight: FloatTensor<B>, mask: Option<FloatTensor<B>>, bias: Option<FloatTensor<B>>, output_grad: FloatTensor<B>, options: DeformConvOptions<2>, ) -> DeformConv2dBackward<B>; fn conv3d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<3>, ) -> FloatTensor<B>; fn conv_transpose2d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>; fn conv_transpose3d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>; fn avg_pool2d( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B>; fn avg_pool2d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B>; fn adaptive_avg_pool2d( x: FloatTensor<B>, output_size: [usize; 2], ) -> FloatTensor<B>; fn adaptive_avg_pool2d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>; fn max_pool2d( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> FloatTensor<B>; fn max_pool2d_with_indices( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> MaxPool2dWithIndices<B>; fn max_pool2d_with_indices_backward( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> MaxPool2dBackward<B>; fn interpolate( x: FloatTensor<B>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B>; fn interpolate_backward( x: FloatTensor<B>, grad: FloatTensor<B>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B>; // Provided methods fn embedding( weights: FloatTensor<B>, indices: IntTensor<B>, ) -> FloatTensor<B> { ... } fn embedding_backward( weights: FloatTensor<B>, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> FloatTensor<B> { ... } fn conv1d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<1>, ) -> FloatTensor<B> { ... } fn conv1d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<1>, ) -> FloatTensor<B> { ... } fn conv1d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<1>, ) -> FloatTensor<B> { ... } fn conv1d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn conv2d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<2>, ) -> FloatTensor<B> { ... } fn conv2d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<2>, ) -> FloatTensor<B> { ... } fn conv2d_bias_backward( x: FloatTensor<B>, weight: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn conv3d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<3>, ) -> FloatTensor<B> { ... } fn conv3d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<3>, ) -> FloatTensor<B> { ... } fn conv3d_bias_backward( x: FloatTensor<B>, weight: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn conv_transpose1d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B> { ... } fn conv_transpose1d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B> { ... } fn conv_transpose1d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B> { ... } fn conv_transpose1d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn conv_transpose2d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B> { ... } fn conv_transpose2d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B> { ... } fn conv_transpose2d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn conv_transpose3d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B> { ... } fn conv_transpose3d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B> { ... } fn conv_transpose3d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn unfold4d( x: FloatTensor<B>, kernel_size: [usize; 2], options: UnfoldOptions, ) -> FloatTensor<B> { ... } fn avg_pool1d( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B> { ... } fn avg_pool1d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B> { ... } fn adaptive_avg_pool1d( x: FloatTensor<B>, output_size: usize, ) -> FloatTensor<B> { ... } fn adaptive_avg_pool1d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B> { ... } fn max_pool1d( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> FloatTensor<B> { ... } fn max_pool1d_with_indices( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> MaxPool1dWithIndices<B> { ... } fn max_pool1d_with_indices_backward( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> MaxPool1dBackward<B> { ... }
}
Expand description

Module operations trait.

Required Methods§

Source

fn conv2d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<2>, ) -> FloatTensor<B>

Two dimensional convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2], bias: [channels_out],

Source

fn deform_conv2d( x: FloatTensor<B>, offset: FloatTensor<B>, weight: FloatTensor<B>, mask: Option<FloatTensor<B>>, bias: Option<FloatTensor<B>>, options: DeformConvOptions<2>, ) -> FloatTensor<B>

Two dimensional deformable convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2], bias: [channels_out],

Source

fn deform_conv2d_backward( x: FloatTensor<B>, offset: FloatTensor<B>, weight: FloatTensor<B>, mask: Option<FloatTensor<B>>, bias: Option<FloatTensor<B>>, output_grad: FloatTensor<B>, options: DeformConvOptions<2>, ) -> DeformConv2dBackward<B>

Backward pass for the deform_conv2d operation.

Source

fn conv3d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<3>, ) -> FloatTensor<B>

Three dimensional convolution.

§Shapes

x: [batch_size, channels_in, depth, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2, kernel_size_3], bias: [channels_out],

Source

fn conv_transpose2d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>

Two dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_in, channels_out, kernel_size_1, kernel_size_2], bias: [channels_out],

Source

fn conv_transpose3d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>

Three dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_in, channels_out, kernel_size_1, kernel_size_2, kernel_size_3], bias: [channels_out],

Source

fn avg_pool2d( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B>

Two dimensional avg pooling.

§Shapes

x: [batch_size, channels, height, width],

Source

fn avg_pool2d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> FloatTensor<B>

Backward pass for the avg pooling 2d operation.

Source

fn adaptive_avg_pool2d( x: FloatTensor<B>, output_size: [usize; 2], ) -> FloatTensor<B>

Two dimensional adaptive avg pooling.

§Shapes

x: [batch_size, channels, height, width],

Source

fn adaptive_avg_pool2d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the adaptive avg pooling 2d operation.

Source

fn max_pool2d( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> FloatTensor<B>

Two dimensional max pooling.

§Shapes

x: [batch_size, channels, height, width],

Source

fn max_pool2d_with_indices( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> MaxPool2dWithIndices<B>

Two dimensional max pooling with indices.

§Shapes

x: [batch_size, channels, height, width],

Source

fn max_pool2d_with_indices_backward( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> MaxPool2dBackward<B>

Backward pass for the max pooling 2d operation.

Source

fn interpolate( x: FloatTensor<B>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B>

Down/up samples the input.

§Shapes

x: [batch_size, channels, height, width],

Source

fn interpolate_backward( x: FloatTensor<B>, grad: FloatTensor<B>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B>

Backward pass for the interpolate operation.

Provided Methods§

Source

fn embedding(weights: FloatTensor<B>, indices: IntTensor<B>) -> FloatTensor<B>

Embedding operation.

§Arguments
  • weights - The embedding weights.
  • indices - The indices tensor.
§Returns

The output tensor.

Source

fn embedding_backward( weights: FloatTensor<B>, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> FloatTensor<B>

Embedding backward operation.

§Arguments
  • weights - The embedding weights.
  • output_grad - The output gradient.
  • indices - The indices tensor.
§Returns

The gradient.

Source

fn conv1d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<1>, ) -> FloatTensor<B>

One dimensional convolution.

§Shapes

x: [batch_size, channels_in, length], weight: [channels_out, channels_in, kernel_size], bias: [channels_out],

Source

fn conv1d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<1>, ) -> FloatTensor<B>

Backward pass for the conv1d operation, returning the gradient for x.

Source

fn conv1d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<1>, ) -> FloatTensor<B>

Backward pass for the conv1d operation, returning the gradient for weight.

Source

fn conv1d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the conv1d operation, returning the gradient for bias.

Source

fn conv2d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<2>, ) -> FloatTensor<B>

Backward pass for the conv2d operation, returning the gradient for x.

Source

fn conv2d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<2>, ) -> FloatTensor<B>

Backward pass for the conv2d operation, returning the gradient for weight.

Source

fn conv2d_bias_backward( x: FloatTensor<B>, weight: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the conv2d operation, returning the gradient for bias.

Source

fn conv3d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<3>, ) -> FloatTensor<B>

Backward pass for the conv3d operation, returning the gradient for x.

Source

fn conv3d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<3>, ) -> FloatTensor<B>

Backward pass for the conv3d operation, returning the gradient for weight.

Source

fn conv3d_bias_backward( x: FloatTensor<B>, weight: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the conv3d operation, returning the gradient for bias.

Source

fn conv_transpose1d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B>

One dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, length], weight: [channels_in, channels_out, length], bias: [channels_out],

Source

fn conv_transpose1d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B>

Backward pass for the conv transpose 1d operation, returning the gradient for x.

Source

fn conv_transpose1d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B>

Backward pass for the conv transpose 1d operation, returning the gradient for weight.

Source

fn conv_transpose1d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the conv transpose 1d operation, returning the gradient for bias.

Source

fn conv_transpose2d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>

Backward pass for the conv transpose 2d operation, returning the gradient for x.

Source

fn conv_transpose2d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>

Backward pass for the conv transpose 2d operation, returning the gradient for weight.

Source

fn conv_transpose2d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the conv transpose 2d operation, returning the gradient for bias.

Source

fn conv_transpose3d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>

Backward pass for the conv transpose 3d operation, returning the gradient for x.

Source

fn conv_transpose3d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>

Backward pass for the conv transpose 3d operation, returning the gradient for weight.

Source

fn conv_transpose3d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the conv transpose 3d operation, returning the gradient for bias.

Source

fn unfold4d( x: FloatTensor<B>, kernel_size: [usize; 2], options: UnfoldOptions, ) -> FloatTensor<B>

Four-dimensional unfolding.

§Shapes
  • x: [batch_size, channels_in, height, width],
  • returns: [batch_size, channels_in * kernel_size_1 * kernel_size_2, number of blocks],
Source

fn avg_pool1d( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B>

One dimensional avg pooling.

§Shapes

x: [batch_size, channels, length],

Source

fn avg_pool1d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> FloatTensor<B>

Backward pass for the avg pooling 1d operation.

Source

fn adaptive_avg_pool1d(x: FloatTensor<B>, output_size: usize) -> FloatTensor<B>

One dimensional adaptive avg pooling.

§Shapes

x: [batch_size, channels, length],

Source

fn adaptive_avg_pool1d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>

Backward pass for the adaptive avg pooling 1d operation.

Source

fn max_pool1d( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> FloatTensor<B>

One dimensional max pooling.

§Shapes

x: [batch_size, channels, length],

Source

fn max_pool1d_with_indices( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> MaxPool1dWithIndices<B>

One dimensional max pooling with indices.

§Shapes

x: [batch_size, channels, height, width],

Source

fn max_pool1d_with_indices_backward( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> MaxPool1dBackward<B>

Backward pass for the max pooling 1d operation.

Dyn Compatibility§

This trait is not dyn compatible.

In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.

Implementors§