pub trait ActivationOps<B: Backend> {
    // Provided methods
    fn relu<const D: usize>(
        tensor: B::TensorPrimitive<D>
    ) -> B::TensorPrimitive<D> { ... }
    fn relu_backward<const D: usize>(
        output: B::TensorPrimitive<D>,
        grad: B::TensorPrimitive<D>
    ) -> B::TensorPrimitive<D> { ... }
    fn gelu<const D: usize>(
        tensor: B::TensorPrimitive<D>
    ) -> B::TensorPrimitive<D> { ... }
    fn gelu_backward<const D: usize>(
        x: B::TensorPrimitive<D>,
        grad: B::TensorPrimitive<D>
    ) -> B::TensorPrimitive<D> { ... }
}
Expand description

Activation function operations.

This trait let backend implementations override activation functions for better performance.

Provided Methods§

source

fn relu<const D: usize>(tensor: B::TensorPrimitive<D>) -> B::TensorPrimitive<D>

Applies the ReLU activation function.

Arguments
  • tensor - The tensor.
Returns

The output tensor.

source

fn relu_backward<const D: usize>( output: B::TensorPrimitive<D>, grad: B::TensorPrimitive<D> ) -> B::TensorPrimitive<D>

Applies the ReLU activation function backward.

Arguments
  • output - The output tensor.
Returns

The gradient.

source

fn gelu<const D: usize>(tensor: B::TensorPrimitive<D>) -> B::TensorPrimitive<D>

Applies the Gelu activation function.

Arguments
  • tensor - The tensor.
Returns

The output tensor.

source

fn gelu_backward<const D: usize>( x: B::TensorPrimitive<D>, grad: B::TensorPrimitive<D> ) -> B::TensorPrimitive<D>

Applies the Gelu activation function backward.

Arguments
  • x - The tensor.
  • grad - The gradient.
Returns

The output tensor.

Implementors§