pub trait ActivationOps<B>where
B: Backend,{
Show 14 methods
// Provided methods
fn leaky_relu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
negative_slope: Scalar,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn relu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn relu_backward(
output: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn gelu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn prelu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
alpha: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn gelu_backward(
x: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn sigmoid(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn sigmoid_backward(
output: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn hard_sigmoid(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
alpha: Scalar,
beta: Scalar,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn log_sigmoid(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn softmax(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
dim: usize,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn log_softmax(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
dim: usize,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn softmin(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
dim: usize,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
fn log_sigmoid_backward(
x: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive { ... }
}Expand description
Activation function operations.
This trait let backend implementations override activation functions for better performance.
Provided Methods§
Sourcefn leaky_relu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
negative_slope: Scalar,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn leaky_relu( tensor: <B as BackendTypes>::FloatTensorPrimitive, negative_slope: Scalar, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn relu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn relu( tensor: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn relu_backward(
output: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn relu_backward( output: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn gelu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn gelu( tensor: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn prelu(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
alpha: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn prelu( tensor: <B as BackendTypes>::FloatTensorPrimitive, alpha: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Applies the PReLu activation function.
§Arguments
tensor- The input tensoralpha- The weight tensor
Sourcefn gelu_backward(
x: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn gelu_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn sigmoid(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn sigmoid( tensor: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn sigmoid_backward(
output: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn sigmoid_backward( output: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn hard_sigmoid(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
alpha: Scalar,
beta: Scalar,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn hard_sigmoid( tensor: <B as BackendTypes>::FloatTensorPrimitive, alpha: Scalar, beta: Scalar, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn log_sigmoid(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn log_sigmoid( tensor: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn softmax(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
dim: usize,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn softmax( tensor: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive
Applies the softmax function along the given dimension.
Uses the max-shift trick for numerical stability: the per-row max is detached
so no gradient flows back through it (the shift is a numerical-stability
transformation, not part of the function).
§Arguments
tensor- The tensor.dim- The dimension along which softmax is computed.
§Returns
The output tensor.
Sourcefn log_softmax(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
dim: usize,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn log_softmax( tensor: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn softmin(
tensor: <B as BackendTypes>::FloatTensorPrimitive,
dim: usize,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn softmin( tensor: <B as BackendTypes>::FloatTensorPrimitive, dim: usize, ) -> <B as BackendTypes>::FloatTensorPrimitive
Sourcefn log_sigmoid_backward(
x: <B as BackendTypes>::FloatTensorPrimitive,
grad: <B as BackendTypes>::FloatTensorPrimitive,
) -> <B as BackendTypes>::FloatTensorPrimitive
fn log_sigmoid_backward( x: <B as BackendTypes>::FloatTensorPrimitive, grad: <B as BackendTypes>::FloatTensorPrimitive, ) -> <B as BackendTypes>::FloatTensorPrimitive
Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.