burn_optim/grad_clipping/
base.rs1use burn_core as burn;
2
3use burn::tensor::backend::Backend;
4use burn::{config::Config, tensor::Tensor};
5
6#[derive(Config, Debug)]
8pub enum GradientClippingConfig {
9 Value(f32),
11
12 Norm(f32),
14}
15
16impl GradientClippingConfig {
17 pub fn init(&self) -> GradientClipping {
23 match self {
24 GradientClippingConfig::Value(val) => GradientClipping::Value(*val),
25 GradientClippingConfig::Norm(val) => GradientClipping::Norm(*val),
26 }
27 }
28}
29
30#[derive(Clone)]
34pub enum GradientClipping {
35 Value(f32),
37
38 Norm(f32),
40}
41
42impl GradientClipping {
43 pub fn clip_gradient<B: Backend, const D: usize>(&self, grad: Tensor<B, D>) -> Tensor<B, D> {
53 match self {
54 GradientClipping::Value(threshold) => self.clip_by_value(grad, *threshold),
55 GradientClipping::Norm(max_norm) => self.clip_by_norm(grad, *max_norm),
56 }
57 }
58
59 fn clip_by_value<B: Backend, const D: usize>(
60 &self,
61 grad: Tensor<B, D>,
62 threshold: f32,
63 ) -> Tensor<B, D> {
64 let greater_mask = grad.clone().greater_elem(threshold);
65 let lower_mask = grad.clone().lower_elem(-threshold);
66
67 let clipped_grad = grad.mask_fill(greater_mask, threshold);
68
69 clipped_grad.mask_fill(lower_mask, -threshold)
70 }
71
72 fn clip_by_norm<B: Backend, const D: usize>(
73 &self,
74 grad: Tensor<B, D>,
75 threshold: f32,
76 ) -> Tensor<B, D> {
77 let norm = Self::l2_norm(grad.clone());
78 let clip_coef = threshold / norm.add_scalar(1e-6); let clip_coef_clamped = clip_coef.clamp_max(1.0);
80 grad.mul(clip_coef_clamped.unsqueeze())
81 }
82
83 fn l2_norm<B: Backend, const D: usize>(tensor: Tensor<B, D>) -> Tensor<B, 1> {
84 let squared = tensor.powi_scalar(2);
85 let sum = squared.sum();
86 sum.sqrt()
87 }
88}
89
90#[cfg(test)]
91mod tests {
92 use super::*;
93 use crate::TestBackend;
94 use burn::tensor::Tensor;
95
96 #[test]
97 fn test_clip_by_value() {
98 let gradient: Tensor<TestBackend, 2> = Tensor::from_floats(
99 [
100 [0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
101 [0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
102 ],
103 &Default::default(),
104 );
105
106 let clipped_gradient = GradientClipping::Value(0.5).clip_gradient(gradient);
107 let clipped_gradient_data = clipped_gradient.into_data();
108
109 for value in clipped_gradient_data.iter::<f32>() {
110 assert!(value <= 0.5);
111 }
112 }
113
114 #[test]
115 fn test_clip_by_norm() {
116 let gradient: Tensor<TestBackend, 2> = Tensor::from_floats(
117 [
118 [0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
119 [0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
120 ],
121 &Default::default(),
122 );
123
124 let clipped_gradient = GradientClipping::Norm(2.2).clip_gradient(gradient);
125 let clipped_gradient_data = clipped_gradient.into_data();
126
127 for value in clipped_gradient_data.iter::<f32>() {
128 assert!(value <= 0.88);
129 }
130 }
131 #[test]
132 fn test_clip_by_norm_no_clipping() {
133 let gradient: Tensor<TestBackend, 2> = Tensor::from_floats(
134 [[0.3, 0.4, 0.5, 0.2], [0.1, 0.6, 0.3, 0.4]],
135 &Default::default(),
136 );
137
138 let clipped_gradient = GradientClipping::Norm(2.2).clip_gradient(gradient.clone());
139
140 clipped_gradient
141 .into_data()
142 .assert_eq(&gradient.into_data(), true);
143 }
144}