Skip to main content

ModuleOps

Trait ModuleOps 

Source
pub trait ModuleOps<B>
where B: Backend,
{
Show 56 methods // Required methods fn conv2d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn deform_conv2d( x: <B as BackendTypes>::FloatTensorPrimitive, offset: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, mask: Option<<B as BackendTypes>::FloatTensorPrimitive>, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: DeformConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn deform_conv2d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, offset: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, mask: Option<<B as BackendTypes>::FloatTensorPrimitive>, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: DeformConvOptions<2>, ) -> DeformConv2dBackward<B>; fn conv3d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn conv_transpose2d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvTransposeOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn conv_transpose3d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvTransposeOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn avg_pool2d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn avg_pool2d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn adaptive_avg_pool2d( x: <B as BackendTypes>::FloatTensorPrimitive, output_size: [usize; 2], ) -> <B as BackendTypes>::FloatTensorPrimitive; fn adaptive_avg_pool2d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn max_pool2d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn max_pool2d_with_indices( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, ) -> MaxPool2dWithIndices<B>; fn max_pool2d_with_indices_backward( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, output_grad: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> MaxPool2dBackward<B>; fn interpolate( x: <B as BackendTypes>::FloatTensorPrimitive, output_size: [usize; 2], options: InterpolateOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn interpolate_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, output_size: [usize; 2], options: InterpolateOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn attention( query: <B as BackendTypes>::FloatTensorPrimitive, key: <B as BackendTypes>::FloatTensorPrimitive, value: <B as BackendTypes>::FloatTensorPrimitive, mask: Option<<B as BackendTypes>::BoolTensorPrimitive>, attn_bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: AttentionModuleOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive; fn rfft( signal: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, n: Option<usize>, ) -> (<B as BackendTypes>::FloatTensorPrimitive, <B as BackendTypes>::FloatTensorPrimitive); fn irfft( spectrum_re: <B as BackendTypes>::FloatTensorPrimitive, spectrum_im: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, n: Option<usize>, ) -> <B as BackendTypes>::FloatTensorPrimitive; // Provided methods fn embedding( weights: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn embedding_backward( weights: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn linear( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn linear_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn linear_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn linear_bias_backward( output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv1d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv1d_x_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv1d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv1d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv2d_x_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv2d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv2d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv3d_x_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv3d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv3d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose1d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvTransposeOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose1d_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose1d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose1d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose2d_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose2d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose2d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose3d_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose3d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn conv_transpose3d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn unfold4d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], options: UnfoldOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn avg_pool1d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn avg_pool1d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn adaptive_avg_pool1d( x: <B as BackendTypes>::FloatTensorPrimitive, output_size: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn adaptive_avg_pool1d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn max_pool1d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn max_pool1d_with_indices( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, ) -> MaxPool1dWithIndices<B> { ... } fn max_pool1d_with_indices_backward( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, output_grad: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> MaxPool1dBackward<B> { ... } fn layer_norm( tensor: <B as BackendTypes>::FloatTensorPrimitive, gamma: <B as BackendTypes>::FloatTensorPrimitive, beta: Option<<B as BackendTypes>::FloatTensorPrimitive>, epsilon: f64, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn ctc_loss( log_probs: <B as BackendTypes>::FloatTensorPrimitive, targets: <B as BackendTypes>::IntTensorPrimitive, input_lengths: <B as BackendTypes>::IntTensorPrimitive, target_lengths: <B as BackendTypes>::IntTensorPrimitive, blank: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... } fn has_ctc_loss_backward() -> bool { ... } fn ctc_loss_backward( _log_probs: <B as BackendTypes>::FloatTensorPrimitive, _targets: <B as BackendTypes>::IntTensorPrimitive, _input_lengths: <B as BackendTypes>::IntTensorPrimitive, _target_lengths: <B as BackendTypes>::IntTensorPrimitive, _grad_loss: <B as BackendTypes>::FloatTensorPrimitive, _blank: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
}
Expand description

Module operations trait.

Required Methods§

Source

fn conv2d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Two dimensional convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2], bias: [channels_out],

Source

fn deform_conv2d( x: <B as BackendTypes>::FloatTensorPrimitive, offset: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, mask: Option<<B as BackendTypes>::FloatTensorPrimitive>, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: DeformConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Two dimensional deformable convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2], bias: [channels_out],

Source

fn deform_conv2d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, offset: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, mask: Option<<B as BackendTypes>::FloatTensorPrimitive>, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: DeformConvOptions<2>, ) -> DeformConv2dBackward<B>

Backward pass for the deform_conv2d operation.

Source

fn conv3d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Three dimensional convolution.

§Shapes

x: [batch_size, channels_in, depth, height, width], weight: [channels_out, channels_in, kernel_size_1, kernel_size_2, kernel_size_3], bias: [channels_out],

Source

fn conv_transpose2d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvTransposeOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Two dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_in, channels_out, kernel_size_1, kernel_size_2], bias: [channels_out],

Source

fn conv_transpose3d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvTransposeOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Three dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, height, width], weight: [channels_in, channels_out, kernel_size_1, kernel_size_2, kernel_size_3], bias: [channels_out],

Source

fn avg_pool2d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive

Two dimensional avg pooling.

§Shapes

x: [batch_size, channels, height, width],

Source

fn avg_pool2d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the avg pooling 2d operation.

Source

fn adaptive_avg_pool2d( x: <B as BackendTypes>::FloatTensorPrimitive, output_size: [usize; 2], ) -> <B as BackendTypes>::FloatTensorPrimitive

Two dimensional adaptive avg pooling.

§Shapes

x: [batch_size, channels, height, width],

Source

fn adaptive_avg_pool2d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the adaptive avg pooling 2d operation.

Source

fn max_pool2d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive

Two dimensional max pooling.

§Shapes

x: [batch_size, channels, height, width],

Source

fn max_pool2d_with_indices( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, ) -> MaxPool2dWithIndices<B>

Two dimensional max pooling with indices.

§Shapes

x: [batch_size, channels, height, width],

Source

fn max_pool2d_with_indices_backward( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, output_grad: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> MaxPool2dBackward<B>

Backward pass for the max pooling 2d operation.

Source

fn interpolate( x: <B as BackendTypes>::FloatTensorPrimitive, output_size: [usize; 2], options: InterpolateOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive

Down/up samples the input.

§Shapes

x: [batch_size, channels, height, width],

Source

fn interpolate_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, output_size: [usize; 2], options: InterpolateOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the interpolate operation.

Source

fn attention( query: <B as BackendTypes>::FloatTensorPrimitive, key: <B as BackendTypes>::FloatTensorPrimitive, value: <B as BackendTypes>::FloatTensorPrimitive, mask: Option<<B as BackendTypes>::BoolTensorPrimitive>, attn_bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: AttentionModuleOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive

Computes scaled dot-product attention: softmax(QKᵗ * scale) · V, where scale defaults to 1/sqrt(head_dim). Optionally applies masking, additive bias, causal masking, and softcap to the attention scores.

§Arguments
  • query: Query tensor of shape [batch_size, num_heads, seq_len_q, head_dim]
  • key: Key tensor of shape [batch_size, num_heads, seq_len_k, head_dim]
  • value: Value tensor of shape [batch_size, num_heads, seq_len_k, val_dim]
  • mask: Optional boolean mask of shape [batch_size, num_heads, seq_len_q, seq_len_k], where true indicates positions to mask (i.e. set to -inf before softmax).
  • attn_bias: Optional float tensor of shape [batch_size, num_heads, seq_len_q, seq_len_k] added to the attention scores before softmax (e.g. ALiBi, relative position biases).
  • options: Additional attention options (custom scale, softcap, causal masking).
§Returns

A tensor of shape [batch_size, num_heads, seq_len_q, val_dim] representing the attended context per head.

§Note

This implementation does not support dropout and is intended for inference or use cases where dropout is not needed.

Source

fn rfft( signal: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, n: Option<usize>, ) -> (<B as BackendTypes>::FloatTensorPrimitive, <B as BackendTypes>::FloatTensorPrimitive)

Real-valued FFT with optional size parameter.

When n is None, the signal must be a power of two along dim, and the output has signal_len / 2 + 1 frequency bins.

When n is Some(size), size must also be a power of two. The signal is truncated or zero-padded to size and the output has size / 2 + 1 frequency bins. Non-power- of-two sizes are currently rejected at the public API boundary; true arbitrary-n DFT support (Bluestein’s algorithm) is tracked as a follow-up.

Returns two tensors: the real part and the imaginary part.

Source

fn irfft( spectrum_re: <B as BackendTypes>::FloatTensorPrimitive, spectrum_im: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, n: Option<usize>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Inverse real-valued FFT with optional output size.

When n is None, the reconstructed signal length 2 * (spectrum_size - 1) must be a power of two.

When n is Some(size), size must also be a power of two. Output has exactly size samples.

Provided Methods§

Source

fn embedding( weights: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Embedding operation.

§Arguments
  • weights - The embedding weights.
  • indices - The indices tensor.
§Returns

The output tensor.

Source

fn embedding_backward( weights: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Embedding backward operation.

§Arguments
  • weights - The embedding weights.
  • output_grad - The output gradient.
  • indices - The indices tensor.
§Returns

The gradient.

Source

fn linear( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Linear transformation.

§Shapes

x: [..., d_input], weight: [d_input, d_output], bias: [d_output],

Source

fn linear_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for linear, returning the gradient for x.

Source

fn linear_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for linear, returning the gradient for weight.

Source

fn linear_bias_backward( output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for linear, returning the gradient for bias.

Source

fn conv1d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive

One dimensional convolution.

§Shapes

x: [batch_size, channels_in, length], weight: [channels_out, channels_in, kernel_size], bias: [channels_out],

Source

fn conv1d_x_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv1d operation, returning the gradient for x.

Source

fn conv1d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv1d operation, returning the gradient for weight.

Source

fn conv1d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv1d operation, returning the gradient for bias.

Source

fn conv2d_x_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv2d operation, returning the gradient for x.

Source

fn conv2d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv2d operation, returning the gradient for weight.

Source

fn conv2d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv2d operation, returning the gradient for bias.

Source

fn conv3d_x_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv3d operation, returning the gradient for x.

Source

fn conv3d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv3d operation, returning the gradient for weight.

Source

fn conv3d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv3d operation, returning the gradient for bias.

Source

fn conv_transpose1d( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, bias: Option<<B as BackendTypes>::FloatTensorPrimitive>, options: ConvTransposeOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive

One dimensional transposed convolution.

§Shapes

x: [batch_size, channels_in, length], weight: [channels_in, channels_out, length], bias: [channels_out],

Source

fn conv_transpose1d_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 1d operation, returning the gradient for x.

Source

fn conv_transpose1d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<1>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 1d operation, returning the gradient for weight.

Source

fn conv_transpose1d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 1d operation, returning the gradient for bias.

Source

fn conv_transpose2d_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 2d operation, returning the gradient for x.

Source

fn conv_transpose2d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<2>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 2d operation, returning the gradient for weight.

Source

fn conv_transpose2d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 2d operation, returning the gradient for bias.

Source

fn conv_transpose3d_x_backward( weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 3d operation, returning the gradient for x.

Source

fn conv_transpose3d_weight_backward( x: <B as BackendTypes>::FloatTensorPrimitive, weight: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, options: ConvTransposeOptions<3>, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 3d operation, returning the gradient for weight.

Source

fn conv_transpose3d_bias_backward( x: <B as BackendTypes>::FloatTensorPrimitive, bias: <B as BackendTypes>::FloatTensorPrimitive, output_grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the conv transpose 3d operation, returning the gradient for bias.

Source

fn unfold4d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: [usize; 2], options: UnfoldOptions, ) -> <B as BackendTypes>::FloatTensorPrimitive

Four-dimensional unfolding.

§Shapes
  • x: [batch_size, channels_in, height, width],
  • returns: [batch_size, channels_in * kernel_size_1 * kernel_size_2, number of blocks],
Source

fn avg_pool1d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive

One dimensional avg pooling.

§Shapes

x: [batch_size, channels, length],

Source

fn avg_pool1d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the avg pooling 1d operation.

Source

fn adaptive_avg_pool1d( x: <B as BackendTypes>::FloatTensorPrimitive, output_size: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive

One dimensional adaptive avg pooling.

§Shapes

x: [batch_size, channels, length],

Source

fn adaptive_avg_pool1d_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for the adaptive avg pooling 1d operation.

Source

fn max_pool1d( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, ) -> <B as BackendTypes>::FloatTensorPrimitive

One dimensional max pooling.

§Shapes

x: [batch_size, channels, length],

Source

fn max_pool1d_with_indices( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, ) -> MaxPool1dWithIndices<B>

One dimensional max pooling with indices.

§Shapes

x: [batch_size, channels, height, width],

Source

fn max_pool1d_with_indices_backward( x: <B as BackendTypes>::FloatTensorPrimitive, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, output_grad: <B as BackendTypes>::FloatTensorPrimitive, indices: <B as BackendTypes>::IntTensorPrimitive, ) -> MaxPool1dBackward<B>

Backward pass for the max pooling 1d operation.

Source

fn layer_norm( tensor: <B as BackendTypes>::FloatTensorPrimitive, gamma: <B as BackendTypes>::FloatTensorPrimitive, beta: Option<<B as BackendTypes>::FloatTensorPrimitive>, epsilon: f64, ) -> <B as BackendTypes>::FloatTensorPrimitive

Applies Layer Normalization over the last dimension of the input tensor.

Computes (x - mean) / sqrt(var + epsilon) * gamma + beta, where mean and (biased) var are reduced over the last axis.

§Arguments
  • tensor - Input tensor of shape [..., d_model].
  • gamma - Scale tensor of shape [d_model].
  • beta - Optional bias tensor of shape [d_model].
  • epsilon - Numerical stability term added to the variance before the square root.
§Returns

A tensor with the same shape as tensor.

Source

fn ctc_loss( log_probs: <B as BackendTypes>::FloatTensorPrimitive, targets: <B as BackendTypes>::IntTensorPrimitive, input_lengths: <B as BackendTypes>::IntTensorPrimitive, target_lengths: <B as BackendTypes>::IntTensorPrimitive, blank: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive

Computes the Connectionist Temporal Classification (CTC) loss.

Sums over all valid alignments between the input and target sequences using the forward (alpha) algorithm.

§Arguments
  • log_probs - Log-probabilities of shape [T, N, C]
  • targets - Target label indices of shape [N, S]
  • input_lengths - Actual input sequence lengths per batch element [N]
  • target_lengths - Actual target lengths per batch element [N]
  • blank - Index of the blank label
§Returns

Per-sample loss of shape [N]

Source

fn has_ctc_loss_backward() -> bool

Returns true if this backend implements ctc_loss_backward natively.

Autodiff queries this flag to decide between two paths:

Backends that override ctc_loss_backward must also override this to return true.

Source

fn ctc_loss_backward( _log_probs: <B as BackendTypes>::FloatTensorPrimitive, _targets: <B as BackendTypes>::IntTensorPrimitive, _input_lengths: <B as BackendTypes>::IntTensorPrimitive, _target_lengths: <B as BackendTypes>::IntTensorPrimitive, _grad_loss: <B as BackendTypes>::FloatTensorPrimitive, _blank: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive

Backward pass for ctc_loss: gradient w.r.t. log_probs.

Only called when has_ctc_loss_backward returns true. Backends without a native implementation should leave both methods at their defaults; the gradient is computed automatically by autodiff against the decomposed ctc::ctc_loss_default forward.

§Arguments
  • log_probs - Log-probabilities of shape [T, N, C]
  • targets - Target label indices of shape [N, S]
  • input_lengths - Actual input sequence lengths per batch element [N]
  • target_lengths - Actual target lengths per batch element [N]
  • grad_loss - Upstream gradient w.r.t. the per-sample loss [N]
  • blank - Index of the blank label
§Returns

Gradient w.r.t. log_probs of shape [T, N, C]

Dyn Compatibility§

This trait is not dyn compatible.

In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.

Implementors§