aprender-core 0.30.0

Next-generation machine learning library in pure Rust
// CONTRACT: quantization-ordering-v1.yaml
// HASH: sha256:e7f8901234567890
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`

use proptest::prelude::*;

/// Bytes per parameter for each quantization scheme.
/// Based on GGML superblock sizes.
fn bytes_for_scheme(params: u64, scheme: &str) -> u64 {
    match scheme {
        // Q4_K: 144 bytes per 256-element superblock = 0.5625 bytes/param
        // Simplified: 18 bytes per 32 elements
        "Q4K" => params * 18 / 32,
        // Q6_K: 210 bytes per 256-element superblock = 0.8203 bytes/param
        // Simplified: 26 bytes per 32 elements
        "Q6K" => params * 26 / 32,
        // Q8_0: 34 bytes per 32-element block = 1.0625 bytes/param
        "Q8_0" => params * 34 / 32,
        // F16: 2 bytes per parameter
        "F16" => params * 2,
        // F32: 4 bytes per parameter
        "F32" => params * 4,
        _ => 0,
    }
}

/// LoRA alpha scaling factor.
fn lora_scale(alpha: f32, rank: u32) -> f32 {
    alpha / rank as f32
}

proptest! {
    /// Obligation: Size ordering strict (monotonicity)
    /// Formal: Q4K < Q6K < Q8_0 < F16 < F32 bytes for same param count
    #[test]
    fn prop_size_ordering(
        params_base in 1u64..1_000_000
    ) {
        // Use multiples of 256 to avoid integer rounding issues
        let params = params_base * 256;

        let q4k = bytes_for_scheme(params, "Q4K");
        let q6k = bytes_for_scheme(params, "Q6K");
        let q8 = bytes_for_scheme(params, "Q8_0");
        let f16 = bytes_for_scheme(params, "F16");
        let f32_bytes = bytes_for_scheme(params, "F32");

        prop_assert!(q4k > 0, "Q4K bytes must be > 0");
        prop_assert!(q4k < q6k, "Q4K({}) >= Q6K({}) for params={}", q4k, q6k, params);
        prop_assert!(q6k < q8, "Q6K({}) >= Q8_0({}) for params={}", q6k, q8, params);
        prop_assert!(q8 < f16, "Q8_0({}) >= F16({}) for params={}", q8, f16, params);
        prop_assert!(f16 < f32_bytes, "F16({}) >= F32({}) for params={}", f16, f32_bytes, params);
    }

    /// Obligation: Alpha scaling correctness (invariant)
    /// Formal: output scaled by exactly alpha/rank
    #[test]
    fn prop_alpha_scaling(
        alpha in 1.0f32..128.0,
        rank in 1u32..256
    ) {
        let scale = lora_scale(alpha, rank);
        let expected = alpha / rank as f32;
        let diff = (scale - expected).abs();
        prop_assert!(
            diff < 1e-6,
            "lora_scale({}, {})={}, expected {}", alpha, rank, scale, expected
        );

        // Standard case: alpha=16, rank=64 => 0.25
        let standard = lora_scale(16.0, 64);
        let std_diff = (standard - 0.25).abs();
        prop_assert!(
            std_diff < 1e-6,
            "standard scale={}, expected 0.25", standard
        );
    }

    /// Obligation: Dropout expectation (invariant)
    /// Formal: E[mask] = 1 - p within statistical tolerance
    #[test]
    fn prop_dropout_expectation(
        p_percent in 1u32..50
    ) {
        let p = p_percent as f64 / 100.0;

        // DARE rescale: drop with prob p, rescale by 1/(1-p)
        // E[kept * scale] = (1-p) * 1/(1-p) = 1.0
        let keep_prob = 1.0 - p;
        let rescale = 1.0 / keep_prob;
        let expected_contribution = keep_prob * rescale;

        let diff = (expected_contribution - 1.0).abs();
        prop_assert!(
            diff < 1e-10,
            "DARE expectation={}, expected 1.0 for p={}",
            expected_contribution, p
        );
    }

    /// Obligation: Concrete Qwen3.5 sizes (bound)
    /// Formal: 9B params: Q4K~5GB, Q6K~7GB, Q8~9GB, F16~18GB (within 20%)
    #[test]
    fn prop_concrete_sizes(
        _dummy in 0u8..1
    ) {
        let params: u64 = 9_000_000_000; // 9B parameters

        let q4k_gb = bytes_for_scheme(params, "Q4K") as f64 / 1e9;
        let q6k_gb = bytes_for_scheme(params, "Q6K") as f64 / 1e9;
        let q8_gb = bytes_for_scheme(params, "Q8_0") as f64 / 1e9;
        let f16_gb = bytes_for_scheme(params, "F16") as f64 / 1e9;

        // Within 20% of expected
        prop_assert!(
            (q4k_gb - 5.0).abs() / 5.0 < 0.20,
            "Q4K: {} GB, expected ~5 GB", q4k_gb
        );
        prop_assert!(
            (q6k_gb - 7.0).abs() / 7.0 < 0.25,
            "Q6K: {} GB, expected ~7 GB", q6k_gb
        );
        prop_assert!(
            (q8_gb - 9.0).abs() / 9.0 < 0.25,
            "Q8_0: {} GB, expected ~9 GB", q8_gb
        );
        prop_assert!(
            (f16_gb - 18.0).abs() / 18.0 < 0.05,
            "F16: {} GB, expected ~18 GB", f16_gb
        );
    }

    /// Obligation: SIMD quantization equivalence (equivalence)
    #[test]
    #[ignore = "SIMD equivalence — trueno domain"]
    fn prop_simd_equivalence(
        _x in proptest::collection::vec(0u8..=255, 1..32usize)
    ) {
        // Quantization ordering is pure math — no SIMD variant
    }
}