candle_nn/
activation.rs

1//! Activation Functions
2//!
3use candle::{Result, Tensor};
4
5#[derive(Debug, Clone, Copy, PartialEq, serde::Deserialize, serde::Serialize, Default)]
6#[serde(rename_all = "lowercase")]
7pub enum Activation {
8    #[default]
9    #[serde(alias = "gelu")]
10    Gelu,
11    #[serde(alias = "gelu_new")]
12    NewGelu,
13    Relu,
14    Relu2,
15    Relu6,
16    Silu,
17    Sigmoid,
18    HardSigmoid,
19    Swiglu,
20    Swish,
21    HardSwish,
22    Elu(f64),
23    LeakyRelu(f64),
24    #[serde(alias = "gelu_pytorch_tanh")]
25    GeluPytorchTanh,
26}
27
28impl super::Module for Activation {
29    fn forward(&self, xs: &Tensor) -> Result<Tensor> {
30        match self {
31            Self::Gelu => xs.gelu_erf(),
32            // https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78
33            Self::NewGelu => xs.gelu(),
34            Self::Relu => xs.relu(),
35            Self::Relu2 => xs.relu()?.sqr(),
36            Self::Relu6 => xs.clamp(0f32, 6f32),
37            Self::Silu => xs.silu(),
38            Self::Sigmoid => crate::ops::sigmoid(xs),
39            Self::HardSigmoid => crate::ops::hard_sigmoid(xs),
40            Self::Swiglu => crate::ops::swiglu(xs),
41            Self::Swish => xs * crate::ops::sigmoid(xs)?,
42            Self::HardSwish => xs * crate::ops::hard_sigmoid(xs)?,
43            &Self::Elu(alpha) => xs.elu(alpha),
44            &Self::LeakyRelu(negative_slope) => crate::ops::leaky_relu(xs, negative_slope),
45            Self::GeluPytorchTanh => xs.gelu(),
46        }
47    }
48}
49
50#[derive(Clone, Debug)]
51pub struct PReLU {
52    weight: Tensor,
53    is_scalar: bool,
54}
55
56impl PReLU {
57    pub fn new(weight: Tensor, is_scalar: bool) -> Self {
58        Self { weight, is_scalar }
59    }
60
61    pub fn weight(&self) -> &Tensor {
62        &self.weight
63    }
64
65    pub fn is_scalar(&self) -> bool {
66        self.is_scalar
67    }
68}
69
70impl candle::Module for PReLU {
71    fn forward(&self, xs: &Tensor) -> Result<Tensor> {
72        let weight = if self.is_scalar {
73            self.weight.reshape(())?
74        } else if xs.shape() == self.weight.shape() {
75            self.weight.clone()
76        } else if xs.rank() >= 2 {
77            let num_channels = xs.dim(1)?;
78            let num_weights = self.weight.elem_count();
79            if num_weights != num_channels {
80                candle::bail!("error in prelu: unexpected number of channels for the input, got {num_channels}, weight dim is {num_weights}")
81            }
82            let mut s = vec![1; xs.rank()];
83            s[1] = num_weights;
84            self.weight.reshape(s)?
85        } else {
86            self.weight.clone()
87        };
88        let zeros = xs.zeros_like()?;
89        xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)?
90    }
91}
92
93/// Create or initialize a new PReLU layer.
94///
95/// This uses some default name for weights, namely `"weight"`.
96/// # Arguments
97///
98/// * `num_channels` - The number of channels. Use `None` to have as single trainable value and
99///   `Some` for a 1D vector with the appropriate number of channels. When applying the `forward`
100///   function, the input tensor shape `s` should either be one dimension with this number of
101///   channels or if `s.len() >= 2` it should have `s[1]` equal to this number.
102pub fn prelu(num_channels: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> {
103    let init_ws = crate::init::Init::Const(0.25);
104    // When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1.
105    let ws = vs.get_with_hints((num_channels.unwrap_or(1),), "weight", init_ws)?;
106    Ok(PReLU::new(ws, num_channels.is_none()))
107}