1use core::f32::consts::PI;
2
3use burn_tensor::cast::ToElement;
4
5use crate as burn;
6use crate::module::{Content, DisplaySettings, ModuleDisplay};
7use crate::tensor::Tensor;
8use crate::tensor::backend::Backend;
9use crate::{config::Config, module::Module};
10
11use super::Reduction;
12
13#[derive(Config, Debug)]
19pub struct PoissonNllLossConfig {
20 #[config(default = true)]
31 pub log_input: bool,
32 #[config(default = false)]
39 pub full: bool,
40 #[config(default = 1e-8)]
45 pub eps: f64,
46}
47
48impl PoissonNllLossConfig {
49 pub fn init(&self) -> PoissonNllLoss {
54 self.assertions();
55 PoissonNllLoss {
56 log_input: self.log_input,
57 full: self.full,
58 eps: self.eps,
59 }
60 }
61
62 fn assertions(&self) {
67 assert!(
68 self.eps > 0.,
69 "eps for PoissonNllLoss must be a positive number."
70 );
71 }
72}
73
74#[derive(Module, Debug, Clone)]
88#[module(custom_display)]
89pub struct PoissonNllLoss {
90 pub log_input: bool,
92 pub full: bool,
94 pub eps: f64,
96}
97
98impl ModuleDisplay for PoissonNllLoss {
99 fn custom_settings(&self) -> Option<DisplaySettings> {
100 DisplaySettings::new()
101 .with_new_line_after_attribute(false)
102 .optional()
103 }
104
105 fn custom_content(&self, content: Content) -> Option<Content> {
106 content
107 .add("log_input", &self.log_input)
108 .add("full", &self.full)
109 .add("eps", &self.eps)
110 .optional()
111 }
112}
113
114impl PoissonNllLoss {
115 pub fn forward<const D: usize, B: Backend>(
133 &self,
134 predictions: Tensor<B, D>,
135 targets: Tensor<B, D>,
136 reduction: Reduction,
137 ) -> Tensor<B, 1> {
138 let loss = self.forward_no_reduction(predictions, targets);
139 match reduction {
140 Reduction::Mean | Reduction::Auto => loss.mean(),
141 Reduction::Sum => loss.sum(),
142 }
143 }
144
145 pub fn forward_no_reduction<const D: usize, B: Backend>(
161 &self,
162 predictions: Tensor<B, D>,
163 targets: Tensor<B, D>,
164 ) -> Tensor<B, D> {
165 self.assertions(&predictions, &targets);
166 let mut loss;
167 if self.log_input {
168 loss = predictions.clone().exp() - targets.clone() * predictions;
169 } else {
170 loss = predictions.clone() - targets.clone() * (predictions + self.eps).log();
171 }
172 if self.full {
173 let log_stirling_term = targets.clone() * targets.clone().log() - targets.clone()
174 + (targets.clone() * 2. * PI).log() * 0.5;
175 loss = loss
176 + log_stirling_term
177 .mask_where(targets.clone().lower_equal_elem(1), targets.zeros_like());
178 }
179 loss
180 }
181
182 fn assertions<const D: usize, B: Backend>(
189 &self,
190 predictions: &Tensor<B, D>,
191 targets: &Tensor<B, D>,
192 ) {
193 let predictions_dims = predictions.dims();
194 let targets_dims = targets.dims();
195 assert!(
196 predictions_dims == targets_dims,
197 "Shape of targets ({targets_dims:?}) should correspond to outer shape of predictions ({predictions_dims:?})."
198 );
199 assert!(
200 targets
201 .clone()
202 .greater_equal_elem(0.)
203 .all()
204 .into_scalar()
205 .to_bool(),
206 "All the values of `targets` must be non-negative."
207 );
208 if !self.log_input {
209 assert!(
210 predictions
211 .clone()
212 .greater_equal_elem(0.)
213 .all()
214 .into_scalar()
215 .to_bool(),
216 "When `log_input` is `false`, all the values of `predictions` must be non-negative."
217 );
218 }
219 }
220}
221
222#[cfg(test)]
223mod tests {
224 #![allow(clippy::approx_constant)]
225
226 use super::*;
227 use crate::TestBackend;
228 use crate::tensor::TensorData;
229 type TestTensor<const D: usize> = Tensor<TestBackend, D>;
230 use burn_tensor::{Tolerance, ops::FloatElem};
231 type FT = FloatElem<TestBackend>;
232
233 #[test]
234 fn test_poisson_nll_loss() {
235 let predictions = TensorData::from([0., 0., -40., 1., 2., 3.]);
236 let targets = TensorData::from([1., 4.5, 2.5, 0., 0., 2.]);
237
238 let device = Default::default();
239
240 let predictions = TestTensor::<1>::from_data(predictions, &device);
241 let targets = TestTensor::<1>::from_data(targets, &device);
242
243 let poisson = PoissonNllLossConfig::new().init();
244
245 let loss_sum = poisson.forward(predictions.clone(), targets.clone(), Reduction::Sum);
246 let loss = poisson.forward(predictions.clone(), targets.clone(), Reduction::Auto);
247 let loss_no_reduction = poisson.forward_no_reduction(predictions, targets);
248
249 let expected = TensorData::from([1.0000, 1.0000, 100.0000, 2.7183, 7.3891, 14.0855]);
250 loss_no_reduction
251 .into_data()
252 .assert_approx_eq::<FT>(&expected, Tolerance::default());
253
254 let expected = TensorData::from([21.0321]);
255 loss.into_data()
256 .assert_approx_eq::<FT>(&expected, Tolerance::default());
257
258 let expected = TensorData::from([126.1929]);
259 loss_sum
260 .into_data()
261 .assert_approx_eq::<FT>(&expected, Tolerance::default());
262 }
263
264 #[test]
265 fn test_poisson_nll_loss_no_log_input() {
266 let predictions = TensorData::from([0.0, 0.5, 1.0, 1.0, 2.71828, 7.38905, 20.0855]);
267 let targets = TensorData::from([2., 3., 1., 4.5, 0., 0., 2.]);
268
269 let device = Default::default();
270
271 let predictions = TestTensor::<1>::from_data(predictions, &device);
272 let targets = TestTensor::<1>::from_data(targets, &device);
273
274 let poisson = PoissonNllLossConfig::new().with_log_input(false).init();
275
276 let loss_no_reduction = poisson.forward_no_reduction(predictions.clone(), targets.clone());
277
278 let expected = TensorData::from([36.84136, 2.579441, 1.0, 1.0, 2.71828, 7.38905, 14.0855]);
279 loss_no_reduction
280 .into_data()
281 .assert_approx_eq::<FT>(&expected, Tolerance::default());
282 }
283
284 #[test]
285 fn test_poisson_nll_loss_full() {
286 let predictions = TensorData::from([0., 0., -40., 1., 2., 3.]);
287 let targets = TensorData::from([1., 4.5, 2.5, 0., 0., 2.]);
288
289 let device = Default::default();
290
291 let predictions = TestTensor::<1>::from_data(predictions, &device);
292 let targets = TestTensor::<1>::from_data(targets, &device);
293
294 let poisson = PoissonNllLossConfig::new().with_full(true).init();
295
296 let loss_sum = poisson.forward(predictions.clone(), targets.clone(), Reduction::Sum);
297 let loss = poisson.forward(predictions.clone(), targets.clone(), Reduction::Auto);
298 let loss_no_reduction = poisson.forward_no_reduction(predictions, targets);
299
300 let expected = TensorData::from([1.0000, 4.9393, 101.1678, 2.7183, 7.3891, 14.7373]);
301 loss_no_reduction
302 .into_data()
303 .assert_approx_eq::<FT>(&expected, Tolerance::default());
304
305 let expected = TensorData::from([21.9920]);
306 loss.into_data()
307 .assert_approx_eq::<FT>(&expected, Tolerance::default());
308
309 let expected = TensorData::from([131.9518]);
310 loss_sum
311 .into_data()
312 .assert_approx_eq::<FT>(&expected, Tolerance::default());
313 }
314
315 #[cfg(feature = "std")]
316 #[test]
317 fn test_poisson_nll_loss_gradients() {
318 type TestAutodiffTensor = Tensor<crate::TestAutodiffBackend, 1>;
319
320 let predictions = TensorData::from([0., 0., -40., 1., 2., 3.]);
321 let targets = TensorData::from([1., 4.5, 2.5, 0., 0., 2.]);
322
323 let device = Default::default();
324
325 let predictions1 = TestAutodiffTensor::from_data(predictions, &device).require_grad();
326 let predictions2 = predictions1.clone();
327 let targets = TestAutodiffTensor::from_data(targets, &device);
328
329 let poisson = PoissonNllLossConfig::new().with_full(false).init();
330 let poisson_full = PoissonNllLossConfig::new().with_full(true).init();
331
332 let loss_sum = poisson.forward(predictions1.clone(), targets.clone(), Reduction::Sum);
333 let loss_full_sum =
334 poisson_full.forward(predictions2.clone(), targets.clone(), Reduction::Sum);
335
336 let grads = loss_sum.backward();
337 let grads_full = loss_full_sum.backward();
338
339 let grads_predictions1 = predictions1.grad(&grads).unwrap();
340 let grads_predictions2 = predictions2.grad(&grads_full).unwrap();
341
342 let expected = TensorData::from([0.0000, -3.5000, -2.5000, 2.7183, 7.3891, 18.0855]);
343
344 grads_predictions1
345 .into_data()
346 .assert_approx_eq::<FT>(&expected, Tolerance::default());
347 grads_predictions2
348 .into_data()
349 .assert_approx_eq::<FT>(&expected, Tolerance::default());
350 }
351
352 #[test]
353 #[should_panic = "eps for PoissonNllLoss must be a positive number."]
354 fn test_negative_eps() {
355 let _poisson = PoissonNllLossConfig::new().with_eps(0.).init();
356 }
357
358 #[test]
359 #[should_panic = "All the values of `targets` must be non-negative."]
360 fn test_targets_with_negative_values() {
361 let predictions = TensorData::from([0., 0., -40., 1., 2., 3., 4.]);
362 let targets = TensorData::from([1., 4.5, 2.5, 0., 0., 2., -0.42]);
363
364 let device = Default::default();
365
366 let predictions = TestTensor::<1>::from_data(predictions, &device);
367 let targets = TestTensor::<1>::from_data(targets, &device);
368
369 let poisson = PoissonNllLossConfig::new().init();
370
371 let _loss = poisson.forward(predictions.clone(), targets.clone(), Reduction::Auto);
372 }
373
374 #[test]
375 #[should_panic = "Shape of targets"]
376 fn test_shape_tensors() {
377 let predictions = TensorData::from([0., 1., 2.]);
378 let targets = TensorData::from([0., 1.]);
379
380 let device = Default::default();
381
382 let predictions = TestTensor::<1>::from_data(predictions, &device);
383 let targets = TestTensor::<1>::from_data(targets, &device);
384
385 let poisson = PoissonNllLossConfig::new().init();
386
387 let _loss = poisson.forward_no_reduction(predictions.clone(), targets.clone());
388 }
389
390 #[test]
391 #[should_panic = "When `log_input` is `false`, all the values of `predictions` must be non-negative."]
392 fn test_exp_predictions_non_negative() {
393 let predictions = TensorData::from([0.3, -0.1, 0.4]);
394 let targets = TensorData::from([0., 1., 0.]);
395
396 let device = Default::default();
397
398 let predictions = TestTensor::<1>::from_data(predictions, &device);
399 let targets = TestTensor::<1>::from_data(targets, &device);
400
401 let poisson = PoissonNllLossConfig::new().with_log_input(false).init();
402
403 let _loss = poisson.forward_no_reduction(predictions.clone(), targets.clone());
404 }
405
406 #[test]
407 fn display() {
408 let config = PoissonNllLossConfig::new();
409 let loss = config.init();
410
411 assert_eq!(
412 alloc::format!("{loss}"),
413 "PoissonNllLoss {log_input: true, full: false, eps: 0.00000001}"
414 );
415 }
416}