use aprender::autograd::Tensor;
use aprender::nn::CrossEntropyLoss;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_non_negativity(
logits_data in proptest::collection::vec(-10.0f32..10.0, 6..7usize),
target in 0usize..3
) {
let batch = 2;
let classes = 3;
let logits_data: Vec<f32> = (0..batch * classes)
.map(|i| if i < logits_data.len() { logits_data[i] } else { 0.0 })
.collect();
let logits = Tensor::new(&logits_data, &[batch, classes]);
let targets = Tensor::new(
&[target as f32, ((target + 1) % classes) as f32],
&[batch],
);
let loss = CrossEntropyLoss::new();
let result = loss.forward(&logits, &targets);
let val = result.data()[0];
prop_assert!(
val >= -1e-6,
"CE loss={val}, expected >= 0"
);
}
#[test]
fn prop_log_softmax_bounded_above(
data in proptest::collection::vec(-10.0f32..10.0, 2..16usize)
) {
let n = data.len();
let max_val = data.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
let log_sum_exp = max_val
+ data
.iter()
.map(|&x| (x - max_val).exp())
.sum::<f32>()
.ln();
for (i, &x) in data.iter().enumerate() {
let log_softmax_i = x - log_sum_exp;
prop_assert!(
log_softmax_i <= 1e-6,
"log_softmax[{i}]={log_softmax_i}, expected <= 0"
);
}
let _ = n;
}
#[test]
fn prop_decomposition_equivalence(
logits_data in proptest::collection::vec(-5.0f32..5.0, 4..5usize),
target in 0usize..4
) {
let batch = 1;
let classes = 4;
let logits_data: Vec<f32> = (0..classes)
.map(|i| if i < logits_data.len() { logits_data[i] } else { 0.0 })
.collect();
let logits = Tensor::new(&logits_data, &[batch, classes]);
let targets = Tensor::new(&[target as f32], &[batch]);
let loss = CrossEntropyLoss::new();
let result = loss.forward(&logits, &targets);
let ce_val = result.data()[0];
let max_val = logits_data.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
let log_sum_exp = max_val
+ logits_data
.iter()
.map(|&x| (x - max_val).exp())
.sum::<f32>()
.ln();
let expected = -(logits_data[target] - log_sum_exp);
let diff = (ce_val - expected).abs();
prop_assert!(
diff < 1e-4,
"CE={ce_val}, manual={expected}, diff={diff}"
);
}
#[test]
fn prop_finite_output(
logits_data in proptest::collection::vec(-50.0f32..50.0, 6..7usize),
target in 0usize..3
) {
let batch = 2;
let classes = 3;
let logits_data: Vec<f32> = (0..batch * classes)
.map(|i| if i < logits_data.len() { logits_data[i] } else { 0.0 })
.collect();
let logits = Tensor::new(&logits_data, &[batch, classes]);
let targets = Tensor::new(
&[target as f32, ((target + 1) % classes) as f32],
&[batch],
);
let loss = CrossEntropyLoss::new();
let result = loss.forward(&logits, &targets);
let val = result.data()[0];
prop_assert!(
val.is_finite(),
"CE loss={val} is not finite for finite inputs"
);
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_equivalence(
_x in proptest::collection::vec(-10.0f32..10.0, 1..32usize)
) {
}
}