pub trait ActivationOps<B: Backend> {
// Provided methods
fn leaky_relu(
tensor: FloatTensor<B>,
negative_slope: FloatElem<B>,
) -> FloatTensor<B> { ... }
fn relu(tensor: FloatTensor<B>) -> FloatTensor<B> { ... }
fn relu_backward(
output: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn gelu(tensor: FloatTensor<B>) -> FloatTensor<B> { ... }
fn prelu(tensor: FloatTensor<B>, alpha: FloatTensor<B>) -> FloatTensor<B> { ... }
fn gelu_backward(x: FloatTensor<B>, grad: FloatTensor<B>) -> FloatTensor<B> { ... }
fn sigmoid(tensor: FloatTensor<B>) -> FloatTensor<B> { ... }
fn sigmoid_backward(
output: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
fn hard_sigmoid(
tensor: FloatTensor<B>,
alpha: FloatElem<B>,
beta: FloatElem<B>,
) -> FloatTensor<B> { ... }
fn log_sigmoid(tensor: FloatTensor<B>) -> FloatTensor<B> { ... }
fn log_sigmoid_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B> { ... }
}Expand description
Activation function operations.
This trait let backend implementations override activation functions for better performance.
Provided Methods§
Sourcefn leaky_relu(
tensor: FloatTensor<B>,
negative_slope: FloatElem<B>,
) -> FloatTensor<B>
fn leaky_relu( tensor: FloatTensor<B>, negative_slope: FloatElem<B>, ) -> FloatTensor<B>
Sourcefn relu(tensor: FloatTensor<B>) -> FloatTensor<B>
fn relu(tensor: FloatTensor<B>) -> FloatTensor<B>
Sourcefn relu_backward(output: FloatTensor<B>, grad: FloatTensor<B>) -> FloatTensor<B>
fn relu_backward(output: FloatTensor<B>, grad: FloatTensor<B>) -> FloatTensor<B>
Sourcefn gelu(tensor: FloatTensor<B>) -> FloatTensor<B>
fn gelu(tensor: FloatTensor<B>) -> FloatTensor<B>
Sourcefn prelu(tensor: FloatTensor<B>, alpha: FloatTensor<B>) -> FloatTensor<B>
fn prelu(tensor: FloatTensor<B>, alpha: FloatTensor<B>) -> FloatTensor<B>
Applies the PReLu activation function.
§Arguments
tensor- The input tensoralpha- The weight tensor
Sourcefn gelu_backward(x: FloatTensor<B>, grad: FloatTensor<B>) -> FloatTensor<B>
fn gelu_backward(x: FloatTensor<B>, grad: FloatTensor<B>) -> FloatTensor<B>
Sourcefn sigmoid(tensor: FloatTensor<B>) -> FloatTensor<B>
fn sigmoid(tensor: FloatTensor<B>) -> FloatTensor<B>
Sourcefn sigmoid_backward(
output: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B>
fn sigmoid_backward( output: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>
Sourcefn hard_sigmoid(
tensor: FloatTensor<B>,
alpha: FloatElem<B>,
beta: FloatElem<B>,
) -> FloatTensor<B>
fn hard_sigmoid( tensor: FloatTensor<B>, alpha: FloatElem<B>, beta: FloatElem<B>, ) -> FloatTensor<B>
Sourcefn log_sigmoid(tensor: FloatTensor<B>) -> FloatTensor<B>
fn log_sigmoid(tensor: FloatTensor<B>) -> FloatTensor<B>
Sourcefn log_sigmoid_backward(
x: FloatTensor<B>,
grad: FloatTensor<B>,
) -> FloatTensor<B>
fn log_sigmoid_backward( x: FloatTensor<B>, grad: FloatTensor<B>, ) -> FloatTensor<B>
Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.