pub trait ModuleOps<B: Backend> {
Show 56 methods
// Required methods
fn conv2d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvOptions<2>,
) -> FloatTensor<B>;
fn deform_conv2d(
x: FloatTensor<B>,
offset: FloatTensor<B>,
weight: FloatTensor<B>,
mask: Option<FloatTensor<B>>,
bias: Option<FloatTensor<B>>,
options: DeformConvOptions<2>,
) -> FloatTensor<B>;
fn deform_conv2d_backward(
x: FloatTensor<B>,
offset: FloatTensor<B>,
weight: FloatTensor<B>,
mask: Option<FloatTensor<B>>,
bias: Option<FloatTensor<B>>,
output_grad: FloatTensor<B>,
options: DeformConvOptions<2>,
) -> DeformConv2dBackward<B>;
fn conv3d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvOptions<3>,
) -> FloatTensor<B>;
fn conv_transpose2d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvTransposeOptions<2>,
) -> FloatTensor<B>;
fn conv_transpose3d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvTransposeOptions<3>,
) -> FloatTensor<B>;
fn avg_pool2d(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B>;
fn avg_pool2d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B>;
fn adaptive_avg_pool2d(
x: FloatTensor<B>,
output_size: [usize; 2],
) -> FloatTensor<B>;
fn adaptive_avg_pool2d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B>;
fn max_pool2d(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
ceil_mode: bool,
) -> FloatTensor<B>;
fn max_pool2d_with_indices(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
ceil_mode: bool,
) -> MaxPool2dWithIndices<B>;
fn max_pool2d_with_indices_backward(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
ceil_mode: bool,
output_grad: FloatTensor<B>,
indices: IntTensor<B>,
) -> MaxPool2dBackward<B>;
fn interpolate(
x: FloatTensor<B>,
output_size: [usize; 2],
options: InterpolateOptions,
) -> FloatTensor<B>;
fn interpolate_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
output_size: [usize; 2],
options: InterpolateOptions,
) -> FloatTensor<B>;
fn attention(
query: FloatTensor<B>,
key: FloatTensor<B>,
value: FloatTensor<B>,
mask: Option<BoolTensor<B>>,
attn_bias: Option<FloatTensor<B>>,
options: AttentionModuleOptions,
) -> FloatTensor<B>;
fn rfft(
signal: FloatTensor<B>,
dim: usize,
n: Option<usize>,
) -> (FloatTensor<B>, FloatTensor<B>);
fn irfft(
spectrum_re: FloatTensor<B>,
spectrum_im: FloatTensor<B>,
dim: usize,
n: Option<usize>,
) -> FloatTensor<B>;
// Provided methods
fn embedding(
weights: FloatTensor<B>,
indices: IntTensor<B>,
) -> FloatTensor<B> { ... }
fn embedding_backward(
weights: FloatTensor<B>,
output_grad: FloatTensor<B>,
indices: IntTensor<B>,
) -> FloatTensor<B> { ... }
fn linear(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
) -> FloatTensor<B> { ... }
fn linear_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn linear_weight_backward(
x: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn linear_bias_backward(output_grad: FloatTensor<B>) -> FloatTensor<B> { ... }
fn conv1d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvOptions<1>,
) -> FloatTensor<B> { ... }
fn conv1d_x_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<1>,
) -> FloatTensor<B> { ... }
fn conv1d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<1>,
) -> FloatTensor<B> { ... }
fn conv1d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn conv2d_x_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<2>,
) -> FloatTensor<B> { ... }
fn conv2d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<2>,
) -> FloatTensor<B> { ... }
fn conv2d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn conv3d_x_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<3>,
) -> FloatTensor<B> { ... }
fn conv3d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<3>,
) -> FloatTensor<B> { ... }
fn conv3d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn conv_transpose1d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvTransposeOptions<1>,
) -> FloatTensor<B> { ... }
fn conv_transpose1d_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<1>,
) -> FloatTensor<B> { ... }
fn conv_transpose1d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<1>,
) -> FloatTensor<B> { ... }
fn conv_transpose1d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn conv_transpose2d_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<2>,
) -> FloatTensor<B> { ... }
fn conv_transpose2d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<2>,
) -> FloatTensor<B> { ... }
fn conv_transpose2d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn conv_transpose3d_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<3>,
) -> FloatTensor<B> { ... }
fn conv_transpose3d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<3>,
) -> FloatTensor<B> { ... }
fn conv_transpose3d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn unfold4d(
x: FloatTensor<B>,
kernel_size: [usize; 2],
options: UnfoldOptions,
) -> FloatTensor<B> { ... }
fn avg_pool1d(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B> { ... }
fn avg_pool1d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B> { ... }
fn adaptive_avg_pool1d(
x: FloatTensor<B>,
output_size: usize,
) -> FloatTensor<B> { ... }
fn adaptive_avg_pool1d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn max_pool1d(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
ceil_mode: bool,
) -> FloatTensor<B> { ... }
fn max_pool1d_with_indices(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
ceil_mode: bool,
) -> MaxPool1dWithIndices<B> { ... }
fn max_pool1d_with_indices_backward(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
ceil_mode: bool,
output_grad: FloatTensor<B>,
indices: IntTensor<B>,
) -> MaxPool1dBackward<B> { ... }
fn layer_norm(
tensor: FloatTensor<B>,
gamma: FloatTensor<B>,
beta: Option<FloatTensor<B>>,
epsilon: f64,
) -> FloatTensor<B> { ... }
fn ctc_loss(
log_probs: FloatTensor<B>,
targets: IntTensor<B>,
input_lengths: IntTensor<B>,
target_lengths: IntTensor<B>,
blank: usize,
) -> FloatTensor<B> { ... }
fn has_ctc_loss_backward() -> bool { ... }
fn ctc_loss_backward(
_log_probs: FloatTensor<B>,
_targets: IntTensor<B>,
_input_lengths: IntTensor<B>,
_target_lengths: IntTensor<B>,
_grad_loss: FloatTensor<B>,
_blank: usize,
) -> FloatTensor<B> { ... }
}Expand description
Module operations trait.
Required Methods§
Sourcefn conv2d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvOptions<2>,
) -> FloatTensor<B>
fn conv2d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<2>, ) -> FloatTensor<B>
Two dimensional convolution.
§Shapes
x: [batch_size, channels_in, height, width],
weight: [channels_out, channels_in, kernel_size_1, kernel_size_2],
bias: [channels_out],
Sourcefn deform_conv2d(
x: FloatTensor<B>,
offset: FloatTensor<B>,
weight: FloatTensor<B>,
mask: Option<FloatTensor<B>>,
bias: Option<FloatTensor<B>>,
options: DeformConvOptions<2>,
) -> FloatTensor<B>
fn deform_conv2d( x: FloatTensor<B>, offset: FloatTensor<B>, weight: FloatTensor<B>, mask: Option<FloatTensor<B>>, bias: Option<FloatTensor<B>>, options: DeformConvOptions<2>, ) -> FloatTensor<B>
Two dimensional deformable convolution.
§Shapes
x: [batch_size, channels_in, height, width],
weight: [channels_out, channels_in, kernel_size_1, kernel_size_2],
bias: [channels_out],
Sourcefn deform_conv2d_backward(
x: FloatTensor<B>,
offset: FloatTensor<B>,
weight: FloatTensor<B>,
mask: Option<FloatTensor<B>>,
bias: Option<FloatTensor<B>>,
output_grad: FloatTensor<B>,
options: DeformConvOptions<2>,
) -> DeformConv2dBackward<B>
fn deform_conv2d_backward( x: FloatTensor<B>, offset: FloatTensor<B>, weight: FloatTensor<B>, mask: Option<FloatTensor<B>>, bias: Option<FloatTensor<B>>, output_grad: FloatTensor<B>, options: DeformConvOptions<2>, ) -> DeformConv2dBackward<B>
Backward pass for the deform_conv2d operation.
Sourcefn conv3d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvOptions<3>,
) -> FloatTensor<B>
fn conv3d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<3>, ) -> FloatTensor<B>
Three dimensional convolution.
§Shapes
x: [batch_size, channels_in, depth, height, width],
weight: [channels_out, channels_in, kernel_size_1, kernel_size_2, kernel_size_3],
bias: [channels_out],
Sourcefn conv_transpose2d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvTransposeOptions<2>,
) -> FloatTensor<B>
fn conv_transpose2d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>
Two dimensional transposed convolution.
§Shapes
x: [batch_size, channels_in, height, width],
weight: [channels_in, channels_out, kernel_size_1, kernel_size_2],
bias: [channels_out],
Sourcefn conv_transpose3d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvTransposeOptions<3>,
) -> FloatTensor<B>
fn conv_transpose3d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>
Three dimensional transposed convolution.
§Shapes
x: [batch_size, channels_in, height, width],
weight: [channels_in, channels_out, kernel_size_1, kernel_size_2, kernel_size_3],
bias: [channels_out],
Sourcefn avg_pool2d(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B>
fn avg_pool2d( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ceil_mode: bool, ) -> FloatTensor<B>
Sourcefn avg_pool2d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B>
fn avg_pool2d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ceil_mode: bool, ) -> FloatTensor<B>
Backward pass for the avg pooling 2d operation.
Sourcefn adaptive_avg_pool2d(
x: FloatTensor<B>,
output_size: [usize; 2],
) -> FloatTensor<B>
fn adaptive_avg_pool2d( x: FloatTensor<B>, output_size: [usize; 2], ) -> FloatTensor<B>
Sourcefn adaptive_avg_pool2d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B>
fn adaptive_avg_pool2d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the adaptive avg pooling 2d operation.
Sourcefn max_pool2d(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
ceil_mode: bool,
) -> FloatTensor<B>
fn max_pool2d( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, ) -> FloatTensor<B>
Sourcefn max_pool2d_with_indices(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
ceil_mode: bool,
) -> MaxPool2dWithIndices<B>
fn max_pool2d_with_indices( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, ) -> MaxPool2dWithIndices<B>
Sourcefn max_pool2d_with_indices_backward(
x: FloatTensor<B>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
dilation: [usize; 2],
ceil_mode: bool,
output_grad: FloatTensor<B>,
indices: IntTensor<B>,
) -> MaxPool2dBackward<B>
fn max_pool2d_with_indices_backward( x: FloatTensor<B>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ceil_mode: bool, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> MaxPool2dBackward<B>
Backward pass for the max pooling 2d operation.
Sourcefn interpolate(
x: FloatTensor<B>,
output_size: [usize; 2],
options: InterpolateOptions,
) -> FloatTensor<B>
fn interpolate( x: FloatTensor<B>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B>
Sourcefn interpolate_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
output_size: [usize; 2],
options: InterpolateOptions,
) -> FloatTensor<B>
fn interpolate_backward( x: FloatTensor<B>, grad: FloatTensor<B>, output_size: [usize; 2], options: InterpolateOptions, ) -> FloatTensor<B>
Backward pass for the interpolate operation.
Sourcefn attention(
query: FloatTensor<B>,
key: FloatTensor<B>,
value: FloatTensor<B>,
mask: Option<BoolTensor<B>>,
attn_bias: Option<FloatTensor<B>>,
options: AttentionModuleOptions,
) -> FloatTensor<B>
fn attention( query: FloatTensor<B>, key: FloatTensor<B>, value: FloatTensor<B>, mask: Option<BoolTensor<B>>, attn_bias: Option<FloatTensor<B>>, options: AttentionModuleOptions, ) -> FloatTensor<B>
Computes scaled dot-product attention: softmax(QKᵗ * scale) · V, where scale defaults to 1/sqrt(head_dim). Optionally applies masking, additive bias, causal masking, and softcap to the attention scores.
§Arguments
query: Query tensor of shape[batch_size, num_heads, seq_len_q, head_dim]key: Key tensor of shape[batch_size, num_heads, seq_len_k, head_dim]value: Value tensor of shape[batch_size, num_heads, seq_len_k, val_dim]mask: Optional boolean mask of shape[batch_size, num_heads, seq_len_q, seq_len_k], wheretrueindicates positions to mask (i.e. set to -inf before softmax).attn_bias: Optional float tensor of shape[batch_size, num_heads, seq_len_q, seq_len_k]added to the attention scores before softmax (e.g. ALiBi, relative position biases).options: Additional attention options (custom scale, softcap, causal masking).
§Returns
A tensor of shape [batch_size, num_heads, seq_len_q, val_dim]
representing the attended context per head.
§Note
This implementation does not support dropout and is intended for inference or use cases where dropout is not needed.
Sourcefn rfft(
signal: FloatTensor<B>,
dim: usize,
n: Option<usize>,
) -> (FloatTensor<B>, FloatTensor<B>)
fn rfft( signal: FloatTensor<B>, dim: usize, n: Option<usize>, ) -> (FloatTensor<B>, FloatTensor<B>)
Real-valued FFT with optional size parameter.
When n is None, the signal must be a power of two along dim, and the output has
signal_len / 2 + 1 frequency bins.
When n is Some(size), size must also be a power of two. The signal is truncated
or zero-padded to size and the output has size / 2 + 1 frequency bins. Non-power-
of-two sizes are currently rejected at the public API boundary; true arbitrary-n DFT
support (Bluestein’s algorithm) is tracked as a follow-up.
Returns two tensors: the real part and the imaginary part.
Sourcefn irfft(
spectrum_re: FloatTensor<B>,
spectrum_im: FloatTensor<B>,
dim: usize,
n: Option<usize>,
) -> FloatTensor<B>
fn irfft( spectrum_re: FloatTensor<B>, spectrum_im: FloatTensor<B>, dim: usize, n: Option<usize>, ) -> FloatTensor<B>
Inverse real-valued FFT with optional output size.
When n is None, the reconstructed signal length 2 * (spectrum_size - 1) must be
a power of two.
When n is Some(size), size must also be a power of two. Output has exactly
size samples.
Provided Methods§
Sourcefn embedding(weights: FloatTensor<B>, indices: IntTensor<B>) -> FloatTensor<B>
fn embedding(weights: FloatTensor<B>, indices: IntTensor<B>) -> FloatTensor<B>
Sourcefn embedding_backward(
weights: FloatTensor<B>,
output_grad: FloatTensor<B>,
indices: IntTensor<B>,
) -> FloatTensor<B>
fn embedding_backward( weights: FloatTensor<B>, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> FloatTensor<B>
Sourcefn linear(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
) -> FloatTensor<B>
fn linear( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, ) -> FloatTensor<B>
Sourcefn linear_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn linear_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for linear, returning the gradient for x.
Sourcefn linear_weight_backward(
x: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn linear_weight_backward( x: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for linear, returning the gradient for weight.
Sourcefn linear_bias_backward(output_grad: FloatTensor<B>) -> FloatTensor<B>
fn linear_bias_backward(output_grad: FloatTensor<B>) -> FloatTensor<B>
Backward pass for linear, returning the gradient for bias.
Sourcefn conv1d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvOptions<1>,
) -> FloatTensor<B>
fn conv1d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvOptions<1>, ) -> FloatTensor<B>
One dimensional convolution.
§Shapes
x: [batch_size, channels_in, length],
weight: [channels_out, channels_in, kernel_size],
bias: [channels_out],
Sourcefn conv1d_x_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<1>,
) -> FloatTensor<B>
fn conv1d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<1>, ) -> FloatTensor<B>
Backward pass for the conv1d operation, returning the gradient for x.
Sourcefn conv1d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<1>,
) -> FloatTensor<B>
fn conv1d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<1>, ) -> FloatTensor<B>
Backward pass for the conv1d operation, returning the gradient for weight.
Sourcefn conv1d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn conv1d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the conv1d operation, returning the gradient for bias.
Sourcefn conv2d_x_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<2>,
) -> FloatTensor<B>
fn conv2d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<2>, ) -> FloatTensor<B>
Backward pass for the conv2d operation, returning the gradient for x.
Sourcefn conv2d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<2>,
) -> FloatTensor<B>
fn conv2d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<2>, ) -> FloatTensor<B>
Backward pass for the conv2d operation, returning the gradient for weight.
Sourcefn conv2d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn conv2d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the conv2d operation, returning the gradient for bias.
Sourcefn conv3d_x_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<3>,
) -> FloatTensor<B>
fn conv3d_x_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<3>, ) -> FloatTensor<B>
Backward pass for the conv3d operation, returning the gradient for x.
Sourcefn conv3d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvOptions<3>,
) -> FloatTensor<B>
fn conv3d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvOptions<3>, ) -> FloatTensor<B>
Backward pass for the conv3d operation, returning the gradient for weight.
Sourcefn conv3d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn conv3d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the conv3d operation, returning the gradient for bias.
Sourcefn conv_transpose1d(
x: FloatTensor<B>,
weight: FloatTensor<B>,
bias: Option<FloatTensor<B>>,
options: ConvTransposeOptions<1>,
) -> FloatTensor<B>
fn conv_transpose1d( x: FloatTensor<B>, weight: FloatTensor<B>, bias: Option<FloatTensor<B>>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B>
One dimensional transposed convolution.
§Shapes
x: [batch_size, channels_in, length],
weight: [channels_in, channels_out, length],
bias: [channels_out],
Sourcefn conv_transpose1d_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<1>,
) -> FloatTensor<B>
fn conv_transpose1d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B>
Backward pass for the conv transpose 1d operation, returning the gradient for x.
Sourcefn conv_transpose1d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<1>,
) -> FloatTensor<B>
fn conv_transpose1d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<1>, ) -> FloatTensor<B>
Backward pass for the conv transpose 1d operation, returning the gradient for weight.
Sourcefn conv_transpose1d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn conv_transpose1d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the conv transpose 1d operation, returning the gradient for bias.
Sourcefn conv_transpose2d_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<2>,
) -> FloatTensor<B>
fn conv_transpose2d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>
Backward pass for the conv transpose 2d operation, returning the gradient for x.
Sourcefn conv_transpose2d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<2>,
) -> FloatTensor<B>
fn conv_transpose2d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<2>, ) -> FloatTensor<B>
Backward pass for the conv transpose 2d operation, returning the gradient for weight.
Sourcefn conv_transpose2d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn conv_transpose2d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the conv transpose 2d operation, returning the gradient for bias.
Sourcefn conv_transpose3d_x_backward(
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<3>,
) -> FloatTensor<B>
fn conv_transpose3d_x_backward( weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>
Backward pass for the conv transpose 3d operation, returning the gradient for x.
Sourcefn conv_transpose3d_weight_backward(
x: FloatTensor<B>,
weight: FloatTensor<B>,
output_grad: FloatTensor<B>,
options: ConvTransposeOptions<3>,
) -> FloatTensor<B>
fn conv_transpose3d_weight_backward( x: FloatTensor<B>, weight: FloatTensor<B>, output_grad: FloatTensor<B>, options: ConvTransposeOptions<3>, ) -> FloatTensor<B>
Backward pass for the conv transpose 3d operation, returning the gradient for weight.
Sourcefn conv_transpose3d_bias_backward(
x: FloatTensor<B>,
bias: FloatTensor<B>,
output_grad: FloatTensor<B>,
) -> FloatTensor<B>
fn conv_transpose3d_bias_backward( x: FloatTensor<B>, bias: FloatTensor<B>, output_grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the conv transpose 3d operation, returning the gradient for bias.
Sourcefn unfold4d(
x: FloatTensor<B>,
kernel_size: [usize; 2],
options: UnfoldOptions,
) -> FloatTensor<B>
fn unfold4d( x: FloatTensor<B>, kernel_size: [usize; 2], options: UnfoldOptions, ) -> FloatTensor<B>
Four-dimensional unfolding.
§Shapes
- x:
[batch_size, channels_in, height, width], - returns:
[batch_size, channels_in * kernel_size_1 * kernel_size_2, number of blocks],
Sourcefn avg_pool1d(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B>
fn avg_pool1d( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ceil_mode: bool, ) -> FloatTensor<B>
Sourcefn avg_pool1d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
count_include_pad: bool,
ceil_mode: bool,
) -> FloatTensor<B>
fn avg_pool1d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ceil_mode: bool, ) -> FloatTensor<B>
Backward pass for the avg pooling 1d operation.
Sourcefn adaptive_avg_pool1d(x: FloatTensor<B>, output_size: usize) -> FloatTensor<B>
fn adaptive_avg_pool1d(x: FloatTensor<B>, output_size: usize) -> FloatTensor<B>
Sourcefn adaptive_avg_pool1d_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B>
fn adaptive_avg_pool1d_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>
Backward pass for the adaptive avg pooling 1d operation.
Sourcefn max_pool1d(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
ceil_mode: bool,
) -> FloatTensor<B>
fn max_pool1d( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, ) -> FloatTensor<B>
Sourcefn max_pool1d_with_indices(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
ceil_mode: bool,
) -> MaxPool1dWithIndices<B>
fn max_pool1d_with_indices( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, ) -> MaxPool1dWithIndices<B>
Sourcefn max_pool1d_with_indices_backward(
x: FloatTensor<B>,
kernel_size: usize,
stride: usize,
padding: usize,
dilation: usize,
ceil_mode: bool,
output_grad: FloatTensor<B>,
indices: IntTensor<B>,
) -> MaxPool1dBackward<B>
fn max_pool1d_with_indices_backward( x: FloatTensor<B>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ceil_mode: bool, output_grad: FloatTensor<B>, indices: IntTensor<B>, ) -> MaxPool1dBackward<B>
Backward pass for the max pooling 1d operation.
Sourcefn layer_norm(
tensor: FloatTensor<B>,
gamma: FloatTensor<B>,
beta: Option<FloatTensor<B>>,
epsilon: f64,
) -> FloatTensor<B>
fn layer_norm( tensor: FloatTensor<B>, gamma: FloatTensor<B>, beta: Option<FloatTensor<B>>, epsilon: f64, ) -> FloatTensor<B>
Applies Layer Normalization over the last dimension of the input tensor.
Computes (x - mean) / sqrt(var + epsilon) * gamma + beta, where mean and
(biased) var are reduced over the last axis.
§Arguments
tensor- Input tensor of shape[..., d_model].gamma- Scale tensor of shape[d_model].beta- Optional bias tensor of shape[d_model].epsilon- Numerical stability term added to the variance before the square root.
§Returns
A tensor with the same shape as tensor.
Sourcefn ctc_loss(
log_probs: FloatTensor<B>,
targets: IntTensor<B>,
input_lengths: IntTensor<B>,
target_lengths: IntTensor<B>,
blank: usize,
) -> FloatTensor<B>
fn ctc_loss( log_probs: FloatTensor<B>, targets: IntTensor<B>, input_lengths: IntTensor<B>, target_lengths: IntTensor<B>, blank: usize, ) -> FloatTensor<B>
Computes the Connectionist Temporal Classification (CTC) loss.
Sums over all valid alignments between the input and target sequences using the forward (alpha) algorithm.
§Arguments
log_probs- Log-probabilities of shape[T, N, C]targets- Target label indices of shape[N, S]input_lengths- Actual input sequence lengths per batch element[N]target_lengths- Actual target lengths per batch element[N]blank- Index of the blank label
§Returns
Per-sample loss of shape [N]
Sourcefn has_ctc_loss_backward() -> bool
fn has_ctc_loss_backward() -> bool
Returns true if this backend implements ctc_loss_backward
natively.
Autodiff queries this flag to decide between two paths:
true: use the backend’s ctc_loss and ctc_loss_backward directly.false: call ctc::ctc_loss_default for the forward pass; autodiff then differentiates through the decomposed tensor ops.
Backends that override ctc_loss_backward must also override this to
return true.
Sourcefn ctc_loss_backward(
_log_probs: FloatTensor<B>,
_targets: IntTensor<B>,
_input_lengths: IntTensor<B>,
_target_lengths: IntTensor<B>,
_grad_loss: FloatTensor<B>,
_blank: usize,
) -> FloatTensor<B>
fn ctc_loss_backward( _log_probs: FloatTensor<B>, _targets: IntTensor<B>, _input_lengths: IntTensor<B>, _target_lengths: IntTensor<B>, _grad_loss: FloatTensor<B>, _blank: usize, ) -> FloatTensor<B>
Backward pass for ctc_loss: gradient w.r.t. log_probs.
Only called when has_ctc_loss_backward
returns true. Backends without a native implementation should leave
both methods at their defaults; the gradient is computed automatically by
autodiff against the decomposed ctc::ctc_loss_default forward.
§Arguments
log_probs- Log-probabilities of shape[T, N, C]targets- Target label indices of shape[N, S]input_lengths- Actual input sequence lengths per batch element[N]target_lengths- Actual target lengths per batch element[N]grad_loss- Upstream gradient w.r.t. the per-sample loss[N]blank- Index of the blank label
§Returns
Gradient w.r.t. log_probs of shape [T, N, C]
Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.