Trait burn_core::tensor::ops::ActivationOps
source · pub trait ActivationOps<B>where
B: Backend,{
// Provided methods
fn leaky_relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
negative_slope: <B as Backend>::FloatElem
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn relu_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn gelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn prelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
alpha: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn gelu_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn sigmoid_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn log_sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn log_sigmoid_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
}Expand description
Activation function operations.
This trait let backend implementations override activation functions for better performance.
Provided Methods§
sourcefn leaky_relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
negative_slope: <B as Backend>::FloatElem
) -> <B as Backend>::FloatTensorPrimitive<D>
fn leaky_relu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, negative_slope: <B as Backend>::FloatElem ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn relu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn relu_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn relu_backward<const D: usize>( output: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn gelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn gelu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn prelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
alpha: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn prelu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, alpha: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
Applies the PReLu activation function.
§Arguments
tensor- The input tensoralpha- The weight tensor
sourcefn gelu_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn gelu_backward<const D: usize>( x: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn sigmoid<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn sigmoid_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn sigmoid_backward<const D: usize>( output: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn log_sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn log_sigmoid<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
sourcefn log_sigmoid_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>
) -> <B as Backend>::FloatTensorPrimitive<D>
fn log_sigmoid_backward<const D: usize>( x: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D> ) -> <B as Backend>::FloatTensorPrimitive<D>
Object Safety§
This trait is not object safe.