aprender-core 0.31.2

Next-generation machine learning library in pure Rust
// CONTRACT: rmsnorm-kernel-v1.yaml
// HASH: sha256:a1c7f2e8d3b49061
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`

use aprender::autograd::Tensor;
use aprender::nn::Module;
use aprender::nn::RMSNorm;
use proptest::prelude::*;

proptest! {
    /// Obligation: Output is finite (invariant)
    /// Formal: |RMSNorm(x)_i| < infinity for all i when eps > 0
    #[test]
    fn prop_output_is_finite(
        data in proptest::collection::vec(-100.0f32..100.0, 1..64usize)
    ) {
        let n = data.len();
        let norm = RMSNorm::new(&[n]);
        let x = Tensor::new(&data, &[1, n]);
        let y = norm.forward(&x);
        for (i, &val) in y.data().iter().enumerate() {
            prop_assert!(
                val.is_finite(),
                "output[{i}]={val} is not finite"
            );
        }
    }

    /// Obligation: Scale invariance (invariant)
    /// Formal: RMSNorm(alpha*x) = sign(alpha)*RMSNorm(x) for alpha != 0
    #[test]
    fn prop_scale_invariance(
        data in proptest::collection::vec(-10.0f32..10.0, 2..32usize),
        alpha in prop::num::f32::NORMAL.prop_filter(
            "nonzero alpha",
            |a| a.abs() > 0.01 && a.abs() < 100.0
        )
    ) {
        let n = data.len();
        let norm = RMSNorm::without_affine(&[n]);
        let x = Tensor::new(&data, &[1, n]);
        let scaled: Vec<f32> = data.iter().map(|&v| v * alpha).collect();
        let x_scaled = Tensor::new(&scaled, &[1, n]);

        let y_orig = norm.forward(&x);
        let y_scaled = norm.forward(&x_scaled);

        let sign = alpha.signum();
        let orig_data = y_orig.data();
        let scaled_data = y_scaled.data();
        for i in 0..n {
            let expected = sign * orig_data[i];
            let diff = (expected - scaled_data[i]).abs();
            // Relax tolerance for small alpha (FP32 precision degrades near zero)
            let tol = if alpha.abs() < 0.1 { 0.05 } else { 1e-3 };
            prop_assert!(
                diff < tol,
                "scale invariance: sign*y[{i}]={expected} vs y_scaled[{i}]={}, diff={diff}",
                scaled_data[i]
            );
        }
    }

    /// Obligation: RMS denominator is positive (bound)
    /// Formal: RMS(x) > 0 when eps > 0
    #[test]
    fn prop_rms_denominator_positive(
        data in proptest::collection::vec(-100.0f32..100.0, 1..64usize)
    ) {
        let n = data.len();
        let norm = RMSNorm::new(&[n]);
        let x = Tensor::new(&data, &[1, n]);
        let y = norm.forward(&x);
        // If RMS denominator were zero/negative, output would be NaN/Inf
        for &val in y.data() {
            prop_assert!(
                val.is_finite(),
                "non-finite output implies RMS denominator issue"
            );
        }
    }

    /// Obligation: SIMD matches scalar within ULP (equivalence)
    #[test]
    #[ignore = "SIMD equivalence — trueno domain"]
    fn prop_simd_matches_scalar_within_ulp(
        _x in proptest::collection::vec(-100.0f32..100.0, 1..32usize)
    ) {
        // SIMD equivalence testing is trueno's responsibility
    }

    /// Obligation: Normalized RMS approximately 1 (idempotency)
    /// Formal: RMS(RMSNorm(x)/gamma) approximately 1 when gamma = 1
    #[test]
    fn prop_normalized_rms_approx_1(
        data in proptest::collection::vec(-10.0f32..10.0, 2..32usize)
            .prop_filter("non-zero", |d| d.iter().any(|v| v.abs() > 0.01))
    ) {
        let n = data.len();
        let norm = RMSNorm::without_affine(&[n]);
        let x = Tensor::new(&data, &[1, n]);
        let y = norm.forward(&x);
        let y_data = y.data();

        // Compute RMS of normalized output
        let sum_sq: f32 = y_data.iter().map(|v| v * v).sum();
        let rms = (sum_sq / n as f32).sqrt();

        prop_assert!(
            (rms - 1.0).abs() < 0.1,
            "RMS of normalized output = {rms}, expected ~1.0"
        );
    }
}

// =========================================================================
// FALSIFY-NORM-001..003: normalization-kernel-v1.yaml falsification tests
// =========================================================================

/// FALSIFY-NORM-001: RMSNorm output has unit RMS (variance ≈ 1).
#[test]
fn falsify_norm_001_rmsnorm_unit_rms() {
    let data: Vec<f32> = (0..128).map(|i| (i as f32 - 64.0) * 0.1).collect();
    let n = data.len();
    let norm = RMSNorm::new(&[n]);
    let x = Tensor::new(&data, &[1, n]);
    let y = norm.forward(&x);
    let sum_sq: f32 = y.data().iter().map(|v| v * v).sum();
    let rms = (sum_sq / n as f32).sqrt();
    assert!(
        (rms - 1.0).abs() < 0.15,
        "FALSIFY-NORM-001: RMS={rms}, expected ~1.0"
    );
}

/// FALSIFY-NORM-002: RMSNorm zero input produces finite output.
#[test]
fn falsify_norm_002_rmsnorm_zero_input() {
    let n = 64;
    let norm = RMSNorm::new(&[n]);
    let x = Tensor::new(&vec![0.0f32; n], &[1, n]);
    let y = norm.forward(&x);
    for &v in y.data() {
        assert!(v.is_finite(), "FALSIFY-NORM-002: NaN/Inf from zero input");
    }
}

/// FALSIFY-NORM-003: RMSNorm preserves sign structure.
#[test]
fn falsify_norm_003_rmsnorm_sign_preservation() {
    let data = vec![1.0f32, -1.0, 2.0, -2.0, 0.5, -0.5];
    let n = data.len();
    let norm = RMSNorm::new(&[n]);
    let x = Tensor::new(&data, &[1, n]);
    let y = norm.forward(&x);
    // With gamma=1 (default), signs should be preserved
    for i in 0..n {
        let input_sign = data[i].signum();
        let output_sign = y.data()[i].signum();
        assert_eq!(
            input_sign,
            output_sign,
            "FALSIFY-NORM-003: sign flipped at [{i}]: input={}, output={}",
            data[i],
            y.data()[i]
        );
    }
}