aprender-core 0.31.2

Next-generation machine learning library in pure Rust
// CONTRACT: loss-functions-v1.yaml
// HASH: sha256:b7e9a3f1c2d84560
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`

use aprender::autograd::Tensor;
use aprender::loss::{HuberLoss, Loss};
use aprender::nn::loss::{BCEWithLogitsLoss, L1Loss, MSELoss, SmoothL1Loss};
use aprender::primitives::Vector;
use proptest::prelude::*;

proptest! {
    /// FALSIFY-LF-001: MSE loss non-negativity
    /// Obligation: MSE(pred, target) >= 0 for all finite inputs
    /// Formal: ∀ pred, target ∈ ℝⁿ: mean((pred - target)²) ≥ 0
    #[test]
    fn prop_mse_non_negativity(
        pred_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
    ) {
        let n = pred_data.len();
        // Generate independent target data by reversing + shifting
        let target_data: Vec<f32> = pred_data.iter()
            .enumerate()
            .map(|(i, &v)| v + (i as f32) * 0.1 - 3.0)
            .collect();

        let pred = Tensor::from_slice(&pred_data);
        let target = Tensor::from_slice(&target_data[..n]);

        let loss = MSELoss::new().forward(&pred, &target);
        let val = loss.data()[0];

        prop_assert!(
            val >= -1e-6,
            "FALSIFY-LF-001: MSE loss={val}, expected >= 0"
        );
        prop_assert!(
            val.is_finite(),
            "FALSIFY-LF-001: MSE loss={val} is not finite for finite inputs"
        );
    }

    /// FALSIFY-LF-002: Zero loss at perfect prediction
    /// Obligation: L(x, x) = 0 for MSE, L1, and SmoothL1
    /// Formal: ∀ x ∈ ℝⁿ: MSE(x, x) = 0 ∧ L1(x, x) = 0 ∧ SmoothL1(x, x) = 0
    #[test]
    fn prop_zero_at_perfect_prediction(
        data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
    ) {
        let pred = Tensor::from_slice(&data);
        let target = Tensor::from_slice(&data);

        // MSE should be exactly zero
        let mse_val = MSELoss::new().forward(&pred, &target).data()[0];
        prop_assert!(
            mse_val.abs() < 1e-6,
            "FALSIFY-LF-002: MSE at perfect prediction={mse_val}, expected 0"
        );

        // L1 should be exactly zero
        let l1_val = L1Loss::new().forward(&pred, &target).data()[0];
        prop_assert!(
            l1_val.abs() < 1e-6,
            "FALSIFY-LF-002: L1 at perfect prediction={l1_val}, expected 0"
        );

        // SmoothL1 should be exactly zero
        let smooth_val = SmoothL1Loss::new().forward(&pred, &target).data()[0];
        prop_assert!(
            smooth_val.abs() < 1e-6,
            "FALSIFY-LF-002: SmoothL1 at perfect prediction={smooth_val}, expected 0"
        );
    }

    /// FALSIFY-LF-003: BCE with logits non-negativity
    /// Obligation: BCE(logits, targets) >= 0 for all finite logits and binary targets
    /// Formal: ∀ x ∈ ℝⁿ, y ∈ {0,1}ⁿ: max(x,0) - x·y + log(1+exp(-|x|)) ≥ 0
    #[test]
    fn prop_bce_non_negativity(
        logits_data in proptest::collection::vec(-10.0f32..10.0, 1..32usize),
    ) {
        let n = logits_data.len();
        // Binary targets: threshold logits to get 0/1 targets
        let target_data: Vec<f32> = (0..n)
            .map(|i| if i % 2 == 0 { 1.0 } else { 0.0 })
            .collect();

        let logits = Tensor::from_slice(&logits_data);
        let targets = Tensor::from_slice(&target_data);

        let loss = BCEWithLogitsLoss::new().forward(&logits, &targets);
        let val = loss.data()[0];

        prop_assert!(
            val >= -1e-6,
            "FALSIFY-LF-003: BCE loss={val}, expected >= 0"
        );
        prop_assert!(
            val.is_finite(),
            "FALSIFY-LF-003: BCE loss={val} is not finite for finite inputs"
        );
    }

    /// FALSIFY-LF-004: L1 loss non-negativity and symmetry
    /// Obligation: L1(a, b) >= 0 ∧ L1(a, b) = L1(b, a)
    /// Formal: ∀ a, b ∈ ℝⁿ: mean(|a - b|) ≥ 0 ∧ mean(|a - b|) = mean(|b - a|)
    #[test]
    fn prop_l1_non_negativity_and_symmetry(
        a_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
        b_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
    ) {
        // Use min length to ensure same shape
        let n = a_data.len().min(b_data.len());
        let a = Tensor::from_slice(&a_data[..n]);
        let b = Tensor::from_slice(&b_data[..n]);

        let l1 = L1Loss::new();

        let loss_ab = l1.forward(&a, &b).data()[0];
        let loss_ba = l1.forward(&b, &a).data()[0];

        // Non-negativity
        prop_assert!(
            loss_ab >= -1e-6,
            "FALSIFY-LF-004: L1(a,b)={loss_ab}, expected >= 0"
        );

        // Symmetry: |a - b| = |b - a|
        let diff = (loss_ab - loss_ba).abs();
        prop_assert!(
            diff < 1e-5,
            "FALSIFY-LF-004: L1 not symmetric: L1(a,b)={loss_ab}, L1(b,a)={loss_ba}, diff={diff}"
        );
    }

    /// FALSIFY-LF-005: Huber loss non-negativity
    /// Obligation: Huber(pred, target, δ) >= 0 for all δ > 0
    /// Formal: ∀ pred, target ∈ ℝⁿ, δ > 0: huber_δ(pred, target) ≥ 0
    ///
    /// Tests both SmoothL1Loss (nn::loss, Tensor API) and HuberLoss (loss module, Vector API).
    #[test]
    fn prop_huber_non_negativity(
        pred_data in proptest::collection::vec(-50.0f32..50.0, 1..64usize),
        delta in 0.01f32..10.0,
    ) {
        let n = pred_data.len();
        let target_data: Vec<f32> = pred_data.iter()
            .enumerate()
            .map(|(i, &v)| v + (i as f32) * 0.3 - 1.5)
            .collect();

        // SmoothL1Loss (Tensor-based Huber from nn::loss)
        let pred_t = Tensor::from_slice(&pred_data);
        let target_t = Tensor::from_slice(&target_data[..n]);
        let smooth_val = SmoothL1Loss::with_beta(delta).forward(&pred_t, &target_t).data()[0];

        prop_assert!(
            smooth_val >= -1e-6,
            "FALSIFY-LF-005: SmoothL1Loss(delta={delta})={smooth_val}, expected >= 0"
        );
        prop_assert!(
            smooth_val.is_finite(),
            "FALSIFY-LF-005: SmoothL1Loss(delta={delta})={smooth_val} is not finite"
        );

        // HuberLoss (Vector-based from loss module)
        let pred_v = Vector::from_slice(&pred_data);
        let target_v = Vector::from_slice(&target_data[..n]);
        let huber_val = HuberLoss::new(delta).compute(&pred_v, &target_v);

        prop_assert!(
            huber_val >= -1e-6,
            "FALSIFY-LF-005: HuberLoss(delta={delta})={huber_val}, expected >= 0"
        );
        prop_assert!(
            huber_val.is_finite(),
            "FALSIFY-LF-005: HuberLoss(delta={delta})={huber_val} is not finite"
        );
    }

    /// FALSIFY-LF-005b: Huber loss zero at perfect prediction
    /// Obligation: Huber(x, x, δ) = 0 for all δ > 0
    #[test]
    fn prop_huber_zero_at_perfect(
        data in proptest::collection::vec(-50.0f32..50.0, 1..32usize),
        delta in 0.01f32..10.0,
    ) {
        // SmoothL1Loss
        let t = Tensor::from_slice(&data);
        let smooth_val = SmoothL1Loss::with_beta(delta).forward(&t, &t).data()[0];
        prop_assert!(
            smooth_val.abs() < 1e-6,
            "FALSIFY-LF-005b: SmoothL1(x,x,delta={delta})={smooth_val}, expected 0"
        );

        // HuberLoss (Vector)
        let v = Vector::from_slice(&data);
        let huber_val = HuberLoss::new(delta).compute(&v, &v);
        prop_assert!(
            huber_val.abs() < 1e-6,
            "FALSIFY-LF-005b: Huber(x,x,delta={delta})={huber_val}, expected 0"
        );
    }

    /// FALSIFY-LF-001b: MSE symmetry
    /// Obligation: MSE(a, b) = MSE(b, a)
    /// Formal: mean((a-b)²) = mean((b-a)²)
    #[test]
    fn prop_mse_symmetry(
        a_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
        b_data in proptest::collection::vec(-100.0f32..100.0, 1..64usize),
    ) {
        let n = a_data.len().min(b_data.len());
        let a = Tensor::from_slice(&a_data[..n]);
        let b = Tensor::from_slice(&b_data[..n]);

        let mse = MSELoss::new();
        let loss_ab = mse.forward(&a, &b).data()[0];
        let loss_ba = mse.forward(&b, &a).data()[0];

        let diff = (loss_ab - loss_ba).abs();
        prop_assert!(
            diff < 1e-4,
            "FALSIFY-LF-001b: MSE not symmetric: MSE(a,b)={loss_ab}, MSE(b,a)={loss_ba}, diff={diff}"
        );
    }

    /// FALSIFY-LF-003b: BCE finite output for finite inputs
    /// Obligation: finite(logits) ∧ valid(targets) → finite(BCE)
    #[test]
    fn prop_bce_finite_output(
        logits_data in proptest::collection::vec(-50.0f32..50.0, 1..32usize),
    ) {
        let n = logits_data.len();
        // Random binary targets derived from index parity
        let target_data: Vec<f32> = (0..n)
            .map(|i| if (i * 7 + 3) % 3 == 0 { 1.0 } else { 0.0 })
            .collect();

        let logits = Tensor::from_slice(&logits_data);
        let targets = Tensor::from_slice(&target_data);

        let loss = BCEWithLogitsLoss::new().forward(&logits, &targets);
        let val = loss.data()[0];

        prop_assert!(
            val.is_finite(),
            "FALSIFY-LF-003b: BCE={val} is not finite for finite logits in [-50,50]"
        );
    }
}