use aprender::autograd::Tensor;
use aprender::loss::{HuberLoss, Loss};
use aprender::nn::loss::{BCEWithLogitsLoss, L1Loss, MSELoss, SmoothL1Loss};
use aprender::primitives::Vector;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_mse_non_negativity(
pred_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
) {
let n = pred_data.len();
let target_data: Vec<f32> = pred_data.iter()
.enumerate()
.map(|(i, &v)| v + (i as f32) * 0.1 - 3.0)
.collect();
let pred = Tensor::from_slice(&pred_data);
let target = Tensor::from_slice(&target_data[..n]);
let loss = MSELoss::new().forward(&pred, &target);
let val = loss.data()[0];
prop_assert!(
val >= -1e-6,
"FALSIFY-LF-001: MSE loss={val}, expected >= 0"
);
prop_assert!(
val.is_finite(),
"FALSIFY-LF-001: MSE loss={val} is not finite for finite inputs"
);
}
#[test]
fn prop_zero_at_perfect_prediction(
data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
) {
let pred = Tensor::from_slice(&data);
let target = Tensor::from_slice(&data);
let mse_val = MSELoss::new().forward(&pred, &target).data()[0];
prop_assert!(
mse_val.abs() < 1e-6,
"FALSIFY-LF-002: MSE at perfect prediction={mse_val}, expected 0"
);
let l1_val = L1Loss::new().forward(&pred, &target).data()[0];
prop_assert!(
l1_val.abs() < 1e-6,
"FALSIFY-LF-002: L1 at perfect prediction={l1_val}, expected 0"
);
let smooth_val = SmoothL1Loss::new().forward(&pred, &target).data()[0];
prop_assert!(
smooth_val.abs() < 1e-6,
"FALSIFY-LF-002: SmoothL1 at perfect prediction={smooth_val}, expected 0"
);
}
#[test]
fn prop_bce_non_negativity(
logits_data in proptest::collection::vec(-10.0f32..10.0, 1..32usize),
) {
let n = logits_data.len();
let target_data: Vec<f32> = (0..n)
.map(|i| if i % 2 == 0 { 1.0 } else { 0.0 })
.collect();
let logits = Tensor::from_slice(&logits_data);
let targets = Tensor::from_slice(&target_data);
let loss = BCEWithLogitsLoss::new().forward(&logits, &targets);
let val = loss.data()[0];
prop_assert!(
val >= -1e-6,
"FALSIFY-LF-003: BCE loss={val}, expected >= 0"
);
prop_assert!(
val.is_finite(),
"FALSIFY-LF-003: BCE loss={val} is not finite for finite inputs"
);
}
#[test]
fn prop_l1_non_negativity_and_symmetry(
a_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
b_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
) {
let n = a_data.len().min(b_data.len());
let a = Tensor::from_slice(&a_data[..n]);
let b = Tensor::from_slice(&b_data[..n]);
let l1 = L1Loss::new();
let loss_ab = l1.forward(&a, &b).data()[0];
let loss_ba = l1.forward(&b, &a).data()[0];
prop_assert!(
loss_ab >= -1e-6,
"FALSIFY-LF-004: L1(a,b)={loss_ab}, expected >= 0"
);
let diff = (loss_ab - loss_ba).abs();
prop_assert!(
diff < 1e-5,
"FALSIFY-LF-004: L1 not symmetric: L1(a,b)={loss_ab}, L1(b,a)={loss_ba}, diff={diff}"
);
}
#[test]
fn prop_huber_non_negativity(
pred_data in proptest::collection::vec(-50.0f32..50.0, 1..64usize),
delta in 0.01f32..10.0,
) {
let n = pred_data.len();
let target_data: Vec<f32> = pred_data.iter()
.enumerate()
.map(|(i, &v)| v + (i as f32) * 0.3 - 1.5)
.collect();
let pred_t = Tensor::from_slice(&pred_data);
let target_t = Tensor::from_slice(&target_data[..n]);
let smooth_val = SmoothL1Loss::with_beta(delta).forward(&pred_t, &target_t).data()[0];
prop_assert!(
smooth_val >= -1e-6,
"FALSIFY-LF-005: SmoothL1Loss(delta={delta})={smooth_val}, expected >= 0"
);
prop_assert!(
smooth_val.is_finite(),
"FALSIFY-LF-005: SmoothL1Loss(delta={delta})={smooth_val} is not finite"
);
let pred_v = Vector::from_slice(&pred_data);
let target_v = Vector::from_slice(&target_data[..n]);
let huber_val = HuberLoss::new(delta).compute(&pred_v, &target_v);
prop_assert!(
huber_val >= -1e-6,
"FALSIFY-LF-005: HuberLoss(delta={delta})={huber_val}, expected >= 0"
);
prop_assert!(
huber_val.is_finite(),
"FALSIFY-LF-005: HuberLoss(delta={delta})={huber_val} is not finite"
);
}
#[test]
fn prop_huber_zero_at_perfect(
data in proptest::collection::vec(-50.0f32..50.0, 1..32usize),
delta in 0.01f32..10.0,
) {
let t = Tensor::from_slice(&data);
let smooth_val = SmoothL1Loss::with_beta(delta).forward(&t, &t).data()[0];
prop_assert!(
smooth_val.abs() < 1e-6,
"FALSIFY-LF-005b: SmoothL1(x,x,delta={delta})={smooth_val}, expected 0"
);
let v = Vector::from_slice(&data);
let huber_val = HuberLoss::new(delta).compute(&v, &v);
prop_assert!(
huber_val.abs() < 1e-6,
"FALSIFY-LF-005b: Huber(x,x,delta={delta})={huber_val}, expected 0"
);
}
#[test]
fn prop_mse_symmetry(
a_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
b_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
) {
let n = a_data.len().min(b_data.len());
let a = Tensor::from_slice(&a_data[..n]);
let b = Tensor::from_slice(&b_data[..n]);
let mse = MSELoss::new();
let loss_ab = mse.forward(&a, &b).data()[0];
let loss_ba = mse.forward(&b, &a).data()[0];
let diff = (loss_ab - loss_ba).abs();
prop_assert!(
diff < 1e-4,
"FALSIFY-LF-001b: MSE not symmetric: MSE(a,b)={loss_ab}, MSE(b,a)={loss_ba}, diff={diff}"
);
}
#[test]
fn prop_bce_finite_output(
logits_data in proptest::collection::vec(-50.0f32..50.0, 1..32usize),
) {
let n = logits_data.len();
let target_data: Vec<f32> = (0..n)
.map(|i| if (i * 7 + 3) % 3 == 0 { 1.0 } else { 0.0 })
.collect();
let logits = Tensor::from_slice(&logits_data);
let targets = Tensor::from_slice(&target_data);
let loss = BCEWithLogitsLoss::new().forward(&logits, &targets);
let val = loss.data()[0];
prop_assert!(
val.is_finite(),
"FALSIFY-LF-003b: BCE={val} is not finite for finite logits in [-50,50]"
);
}
}