aprender-core 0.31.2

Next-generation machine learning library in pure Rust
// CONTRACT: lora-algebra-v1.yaml
// HASH: sha256:d6e7f89012345678
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`

use proptest::prelude::*;

proptest! {
    /// Obligation: Task vector roundtrip (invariant)
    /// Formal: base + (fine - base) == fine within ULP
    #[test]
    fn prop_task_vector_roundtrip(
        base in proptest::collection::vec(-10.0f32..10.0, 4..16usize),
        delta in proptest::collection::vec(-1.0f32..1.0, 4..16usize)
    ) {
        let n = base.len().min(delta.len());
        let fine: Vec<f32> = base[..n].iter().zip(&delta[..n]).map(|(b, d)| b + d).collect();

        // Task vector: delta = fine - base
        let task_vec: Vec<f32> = fine.iter().zip(&base[..n]).map(|(f, b)| f - b).collect();

        // Roundtrip: base + task_vec == fine
        for i in 0..n {
            let reconstructed = base[i] + task_vec[i];
            let diff = (reconstructed - fine[i]).abs();
            prop_assert!(
                diff < 1e-5,
                "roundtrip failed at [{}]: base+delta={}, fine={}, diff={}",
                i, reconstructed, fine[i], diff
            );
        }
    }

    /// Obligation: Eckart-Young bound (bound)
    /// Formal: ||M - M_r||_F <= sigma_{r+1} for rank-r truncation
    ///
    /// We test a simpler version: for a rank-1 matrix (outer product),
    /// the rank-1 SVD truncation should recover it exactly.
    #[test]
    fn prop_eckart_young_rank1(
        u in proptest::collection::vec(-5.0f32..5.0, 2..8usize),
        v in proptest::collection::vec(-5.0f32..5.0, 2..8usize)
    ) {
        let m = u.len();
        let n = v.len();

        // Rank-1 matrix: M = u * v^T
        let mut matrix = vec![0.0f32; m * n];
        for i in 0..m {
            for j in 0..n {
                matrix[i * n + j] = u[i] * v[j];
            }
        }

        // For a rank-1 matrix, sigma_1 = ||u|| * ||v||, sigma_2 = 0
        // So rank-1 truncation should recover M exactly: ||M - M_1||_F = sigma_2 = 0
        let u_norm: f32 = u.iter().map(|x| x * x).sum::<f32>().sqrt();
        let v_norm: f32 = v.iter().map(|x| x * x).sum::<f32>().sqrt();
        let sigma_1 = u_norm * v_norm;

        // Frobenius norm of M should equal sigma_1
        let frob: f32 = matrix.iter().map(|x| x * x).sum::<f32>().sqrt();
        let diff = (frob - sigma_1).abs();
        prop_assert!(
            diff < 1e-3,
            "||M||_F={} != sigma_1={}, diff={}", frob, sigma_1, diff
        );
    }

    /// Obligation: LoRA shape compatibility (invariant)
    /// Formal: A=[m,r], B=[r,n] => A@B=[m,n]
    #[test]
    fn prop_lora_shape(
        m in 2usize..64,
        n in 2usize..64,
        r in 1usize..16
    ) {
        // LoRA decomposition: W[m,n] ≈ A[m,r] @ B[r,n]
        let a_shape = (m, r);
        let b_shape = (r, n);
        let result_shape = (a_shape.0, b_shape.1);

        prop_assert_eq!(
            result_shape, (m, n),
            "A{:?} @ B{:?} = {:?}, expected ({}, {})",
            a_shape, b_shape, result_shape, m, n
        );

        // Storage savings: r*(m+n) < m*n when r is small
        let lora_storage = r * (m + n);
        let full_storage = m * n;
        if r < m.min(n) / 2 {
            prop_assert!(
                lora_storage < full_storage,
                "no savings: lora={} >= full={} at r={}",
                lora_storage, full_storage, r
            );
        }
    }

    /// Obligation: Shape preservation (invariant)
    /// Formal: merged shape == base shape
    #[test]
    fn prop_shape_preservation(
        m in 2usize..64,
        n in 2usize..64,
        _r in 1usize..8
    ) {
        // base: [m, n]
        // delta (LoRA): A[m,_r] @ B[_r,n] = [m, n]
        // merged = base + scale * A @ B
        let base_shape = (m, n);
        let lora_output_shape = (m, n); // A[m,r] @ B[r,n]
        let merged_shape = base_shape; // element-wise add preserves shape

        prop_assert_eq!(
            merged_shape, base_shape,
            "merge changed shape: {:?} != {:?}", merged_shape, base_shape
        );
        prop_assert_eq!(
            lora_output_shape, base_shape,
            "LoRA output shape {:?} != base {:?}", lora_output_shape, base_shape
        );
    }

    /// Obligation: DARE unbiasedness (invariant)
    /// Formal: E[DARE(delta, p)] = delta (statistical)
    #[test]
    fn prop_dare_unbiased(
        delta_val in -5.0f32..5.0,
        p_percent in 1u32..50  // dropout percentage 1-49%
    ) {
        let p = p_percent as f64 / 100.0;
        let scale = 1.0 / (1.0 - p);

        // DARE: drop with probability p, rescale by 1/(1-p)
        // E[DARE(x, p)] = (1-p) * scale * x = x
        let expected = delta_val as f64;
        let dare_expected = (1.0 - p) * scale * delta_val as f64;

        let diff = (dare_expected - expected).abs();
        prop_assert!(
            diff < 1e-10,
            "DARE not unbiased: E[DARE({}, {})]={}, expected {}, diff={}",
            delta_val, p, dare_expected, expected, diff
        );
    }

    /// Obligation: SIMD LoRA equivalence (equivalence)
    #[test]
    #[ignore = "SIMD equivalence — trueno domain"]
    fn prop_simd_equivalence(
        _x in proptest::collection::vec(-10.0f32..10.0, 1..32usize)
    ) {
        // LoRA algebra is pure math — no SIMD variant
    }
}