aprender-core 0.29.2

Next-generation machine learning library in pure Rust
// CONTRACT: kv-cache-sizing-v1.yaml
// HASH: sha256:f6789012345678ab
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`

use proptest::prelude::*;

/// Per-token-per-layer KV cache bytes.
/// Factor of 2 for K + V.
fn kv_bytes_per_token_layer(n_kv: u32, d_k: u32, bytes_per_element: u32) -> u64 {
    2 * n_kv as u64 * d_k as u64 * bytes_per_element as u64
}

/// Total KV cache bytes.
fn kv_total_bytes(layers: u32, seq_len: u32, n_kv: u32, d_k: u32, bpe: u32) -> u64 {
    layers as u64 * seq_len as u64 * kv_bytes_per_token_layer(n_kv, d_k, bpe)
}

/// Count attention layers (not linear) for hybrid architecture.
fn count_attention_layers(layer_types: &[bool]) -> usize {
    layer_types.iter().filter(|&&is_attn| is_attn).count()
}

proptest! {
    /// Obligation: Per-token KV bytes (invariant)
    /// Formal: kv_bytes = 2 * n_kv * d_k * bpe
    #[test]
    fn prop_per_token_kv_bytes(
        n_kv in 1u32..32,
        d_k in 16u32..256,
        bpe in prop::sample::select(vec![2u32, 4])  // F16 or F32
    ) {
        let bytes = kv_bytes_per_token_layer(n_kv, d_k, bpe);
        let expected = 2 * n_kv as u64 * d_k as u64 * bpe as u64;
        prop_assert_eq!(
            bytes, expected,
            "KV bytes: {} != expected {} for n_kv={}, d_k={}, bpe={}",
            bytes, expected, n_kv, d_k, bpe
        );
        // Must be positive
        prop_assert!(bytes > 0, "KV bytes must be > 0");
    }

    /// Obligation: KV total monotonic in sequence length (monotonicity)
    /// Formal: S1 < S2 => kv_total(S1) < kv_total(S2)
    #[test]
    fn prop_kv_total_monotonic_seq(
        layers in 1u32..64,
        seq1 in 1u32..4096,
        delta in 1u32..4096,
        n_kv in 1u32..16,
        d_k in 16u32..128
    ) {
        let seq2 = seq1.saturating_add(delta);
        let total1 = kv_total_bytes(layers, seq1, n_kv, d_k, 2);
        let total2 = kv_total_bytes(layers, seq2, n_kv, d_k, 2);
        prop_assert!(
            total2 > total1,
            "not monotonic: seq1={} -> {} bytes, seq2={} -> {} bytes",
            seq1, total1, seq2, total2
        );
    }

    /// Obligation: Hybrid KV layers bounded (bound)
    /// Formal: kv_layers <= total_layers
    #[test]
    fn prop_hybrid_kv_bounded(
        layer_types in proptest::collection::vec(proptest::bool::ANY, 2..64usize)
    ) {
        let total = layer_types.len();
        let attn = count_attention_layers(&layer_types);
        prop_assert!(
            attn <= total,
            "attention layers {} > total {}", attn, total
        );
    }

    /// Obligation: Bias absence (invariant)
    /// Formal: has_bias=false => 0 bias tensors
    ///
    /// Test: bias-free projection of zero input yields zero output.
    #[test]
    fn prop_zero_input_identity(
        rows in 2usize..32,
        cols in 2usize..32
    ) {
        // W @ zeros = zeros (no bias)
        let zeros = vec![0.0f32; cols];
        let weights = vec![1.0f32; rows * cols]; // any weights

        // Manual matmul: result[i] = sum_j(W[i,j] * 0) = 0
        let result: Vec<f32> = (0..rows)
            .map(|i| {
                (0..cols)
                    .map(|j| weights[i * cols + j] * zeros[j])
                    .sum()
            })
            .collect();

        for (i, &val) in result.iter().enumerate() {
            prop_assert!(
                val == 0.0,
                "bias-free W @ zeros != zeros at [{}]: {}", i, val
            );
        }
    }

    /// Obligation: KV total monotonic in layers (monotonicity)
    #[test]
    fn prop_kv_total_monotonic_layers(
        layers1 in 1u32..64,
        delta in 1u32..64,
        seq_len in 1u32..2048,
        n_kv in 1u32..16,
        d_k in 16u32..128
    ) {
        let layers2 = layers1.saturating_add(delta);
        let total1 = kv_total_bytes(layers1, seq_len, n_kv, d_k, 2);
        let total2 = kv_total_bytes(layers2, seq_len, n_kv, d_k, 2);
        prop_assert!(
            total2 > total1,
            "not monotonic: L1={} -> {}, L2={} -> {}",
            layers1, total1, layers2, total2
        );
    }

    /// Obligation: SIMD KV equivalence (equivalence)
    #[test]
    #[ignore = "SIMD equivalence — trueno domain"]
    fn prop_simd_equivalence(
        _x in proptest::collection::vec(0u8..=255, 1..32usize)
    ) {
        // KV cache sizing is pure math — no SIMD variant
    }
}