burn_core/grad_clipping/
base.rs

1use crate as burn;
2
3use crate::{config::Config, tensor::Tensor};
4use burn_tensor::backend::Backend;
5
6/// Gradient Clipping provides a way to mitigate exploding gradients
7#[derive(Config)]
8pub enum GradientClippingConfig {
9    /// Clip the gradient by value.
10    Value(f32),
11
12    /// Clip the gradient by norm.
13    Norm(f32),
14}
15
16impl GradientClippingConfig {
17    /// Initialize the gradient clipping.
18    ///
19    /// # Returns
20    ///
21    /// The gradient clipping.
22    pub fn init(&self) -> GradientClipping {
23        match self {
24            GradientClippingConfig::Value(val) => GradientClipping::Value(*val),
25            GradientClippingConfig::Norm(val) => GradientClipping::Norm(*val),
26        }
27    }
28}
29
30/// Gradient Clipping provides a way to mitigate exploding gradients
31/// by clipping every component of the gradient by value or by norm during
32/// backpropagation.
33#[derive(Clone)]
34pub enum GradientClipping {
35    /// Clip the gradient by value.
36    Value(f32),
37
38    /// Clip the gradient by norm.
39    Norm(f32),
40}
41
42impl GradientClipping {
43    /// Clip the gradient.
44    ///
45    /// # Arguments
46    ///
47    /// * `grad` - The gradient to clip.
48    ///
49    /// # Returns
50    ///
51    /// The clipped gradient.
52    pub fn clip_gradient<B: Backend, const D: usize>(&self, grad: Tensor<B, D>) -> Tensor<B, D> {
53        match self {
54            GradientClipping::Value(threshold) => self.clip_by_value(grad, *threshold),
55            GradientClipping::Norm(max_norm) => self.clip_by_norm(grad, *max_norm),
56        }
57    }
58
59    fn clip_by_value<B: Backend, const D: usize>(
60        &self,
61        grad: Tensor<B, D>,
62        threshold: f32,
63    ) -> Tensor<B, D> {
64        let greater_mask = grad.clone().greater_elem(threshold);
65        let lower_mask = grad.clone().lower_elem(-threshold);
66
67        let clipped_grad = grad.mask_fill(greater_mask, threshold);
68
69        clipped_grad.mask_fill(lower_mask, -threshold)
70    }
71
72    fn clip_by_norm<B: Backend, const D: usize>(
73        &self,
74        grad: Tensor<B, D>,
75        threshold: f32,
76    ) -> Tensor<B, D> {
77        use burn_tensor::ElementConversion;
78
79        let norm = Self::l2_norm(grad.clone());
80        let norm_float = norm.into_scalar().elem::<f32>();
81
82        if norm_float > threshold {
83            let scale = threshold / norm_float;
84            grad.mul_scalar(scale)
85        } else {
86            grad
87        }
88    }
89
90    fn l2_norm<B: Backend, const D: usize>(tensor: Tensor<B, D>) -> Tensor<B, 1> {
91        let squared = tensor.powi_scalar(2);
92        let sum = squared.sum();
93        sum.sqrt()
94    }
95}
96
97#[cfg(test)]
98mod tests {
99    use super::*;
100    use crate::TestBackend;
101    use crate::tensor::Tensor;
102
103    #[test]
104    fn test_clip_by_value() {
105        let gradient: Tensor<TestBackend, 2> = Tensor::from_floats(
106            [
107                [0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
108                [0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
109            ],
110            &Default::default(),
111        );
112
113        let clipped_gradient = GradientClipping::Value(0.5).clip_gradient(gradient);
114        let clipped_gradient_data = clipped_gradient.into_data();
115
116        for value in clipped_gradient_data.iter::<f32>() {
117            assert!(value <= 0.5);
118        }
119    }
120
121    #[test]
122    fn test_clip_by_norm() {
123        let gradient: Tensor<TestBackend, 2> = Tensor::from_floats(
124            [
125                [0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
126                [0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
127            ],
128            &Default::default(),
129        );
130
131        let clipped_gradient = GradientClipping::Norm(2.2).clip_gradient(gradient);
132        let clipped_gradient_data = clipped_gradient.into_data();
133
134        for value in clipped_gradient_data.iter::<f32>() {
135            assert!(value <= 0.88);
136        }
137    }
138}