#[cfg(test)]
mod tests {
use super::super::*;
#[test]
fn falsify_ce_001_non_negativity() {
let criterion = CrossEntropyLoss::with_reduction(Reduction::None);
let test_cases: Vec<(Vec<f32>, Vec<f32>, usize)> = vec![
(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![0.0, 2.0], 3),
(vec![0.0, 0.0, 0.0, 0.0], vec![0.0, 1.0], 2),
(vec![-10.0, 10.0, -10.0, 10.0], vec![1.0, 1.0], 2),
(vec![100.0, -100.0, 0.0], vec![0.0], 3),
];
for (i, (logits_data, targets_data, nc)) in test_cases.iter().enumerate() {
let batch = targets_data.len();
let logits = Tensor::new(logits_data, &[batch, *nc]);
let targets = Tensor::new(targets_data, &[batch]);
let loss = criterion.forward(&logits, &targets);
for (j, &val) in loss.data().iter().enumerate() {
assert!(
val >= -1e-6,
"FALSIFIED CE-001 case {i}[{j}]: CE = {val} < 0"
);
}
}
}
#[test]
fn falsify_ce_006_perfect_prediction() {
let criterion = CrossEntropyLoss::with_reduction(Reduction::None);
let logits = Tensor::new(&[50.0, -50.0, -50.0, -50.0, 50.0, -50.0], &[2, 3]);
let targets = Tensor::new(&[0.0, 1.0], &[2]);
let loss = criterion.forward(&logits, &targets);
for (i, &val) in loss.data().iter().enumerate() {
assert!(
val < 1e-3,
"FALSIFIED CE-006: CE for near-perfect prediction [{i}] = {val}, expected ≈ 0"
);
}
}
#[test]
fn falsify_ce_003_numerical_stability() {
let criterion = CrossEntropyLoss::new();
let test_cases: Vec<(Vec<f32>, Vec<f32>, usize)> = vec![
(vec![1000.0, -1000.0, 0.0], vec![0.0], 3),
(vec![-500.0, -500.0, -500.0], vec![1.0], 3),
(vec![0.0, 0.0, 0.0, 0.0], vec![2.0], 4),
];
for (i, (logits_data, targets_data, nc)) in test_cases.iter().enumerate() {
let batch = targets_data.len();
let logits = Tensor::new(logits_data, &[batch, *nc]);
let targets = Tensor::new(targets_data, &[batch]);
let loss = criterion.forward(&logits, &targets);
let val = loss.data()[0];
assert!(
val.is_finite(),
"FALSIFIED CE-003 case {i}: CE = {val} (not finite)"
);
}
}
#[test]
fn falsify_ce_001b_uniform_logits() {
let criterion = CrossEntropyLoss::with_reduction(Reduction::None);
for &num_classes in &[2_usize, 3, 5, 10] {
let logits_data = vec![1.0; num_classes];
let logits = Tensor::new(&logits_data, &[1, num_classes]);
let targets = Tensor::new(&[0.0], &[1]);
let loss = criterion.forward(&logits, &targets);
let expected = (num_classes as f32).ln();
let val = loss.data()[0];
let diff = (val - expected).abs();
assert!(
diff < 1e-4,
"FALSIFIED CE-001b: CE(uniform, C={num_classes}) = {val}, expected log({num_classes}) = {expected}"
);
}
}
#[test]
fn falsify_ce_002_log_softmax_upper_bound() {
let criterion = CrossEntropyLoss::with_reduction(Reduction::None);
for &nc in &[2_usize, 3, 5, 10, 50] {
let logits_data = vec![0.0; nc];
let logits = Tensor::new(&logits_data, &[1, nc]);
let targets = Tensor::new(&[0.0], &[1]);
let loss = criterion.forward(&logits, &targets);
let val = loss.data()[0];
assert!(
val >= -1e-6,
"FALSIFIED CE-002: CE for uniform logits C={nc} = {val} < 0"
);
}
}
mod ce_proptest_falsify {
use super::super::super::*;
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(200))]
#[test]
fn falsify_ce_001_prop_non_negativity(
nc in 2..=10usize,
target in 0..10usize,
seed in 0..1000u32,
) {
let target = target % nc;
let logits_data: Vec<f32> = (0..nc)
.map(|i| ((i as f32 + seed as f32) * 0.37).sin() * 10.0)
.collect();
let criterion = CrossEntropyLoss::with_reduction(Reduction::None);
let logits = Tensor::new(&logits_data, &[1, nc]);
let targets = Tensor::new(&[target as f32], &[1]);
let loss = criterion.forward(&logits, &targets);
let val = loss.data()[0];
prop_assert!(
val >= -1e-6,
"FALSIFIED CE-001-prop: CE = {} < 0 (nc={}, target={})",
val, nc, target
);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(200))]
#[test]
fn falsify_ce_003_prop_finite_output(
nc in 2..=10usize,
target in 0..10usize,
scale in 0.1f32..100.0,
seed in 0..1000u32,
) {
let target = target % nc;
let logits_data: Vec<f32> = (0..nc)
.map(|i| ((i as f32 + seed as f32) * 0.73).cos() * scale)
.collect();
let criterion = CrossEntropyLoss::new();
let logits = Tensor::new(&logits_data, &[1, nc]);
let targets = Tensor::new(&[target as f32], &[1]);
let loss = criterion.forward(&logits, &targets);
let val = loss.data()[0];
prop_assert!(
val.is_finite(),
"FALSIFIED CE-003-prop: CE = {} (not finite), nc={}, scale={}",
val, nc, scale
);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
#[test]
fn falsify_ce_006_prop_perfect_prediction(
nc in 2..=10usize,
target in 0..10usize,
) {
let target = target % nc;
let mut logits_data = vec![-50.0; nc];
logits_data[target] = 50.0;
let criterion = CrossEntropyLoss::with_reduction(Reduction::None);
let logits = Tensor::new(&logits_data, &[1, nc]);
let targets = Tensor::new(&[target as f32], &[1]);
let loss = criterion.forward(&logits, &targets);
let val = loss.data()[0];
prop_assert!(
val < 1e-3,
"FALSIFIED CE-006-prop: CE = {} for dominant logit, expected ≈ 0",
val
);
}
}
}
}