use proptest::prelude::*;
#[derive(Debug, Clone)]
struct TensorConfig {
layers: u32,
has_bias: bool,
has_qk_norm: bool,
num_norms: u32, tied_embeddings: bool,
}
fn per_layer_tensors(cfg: &TensorConfig) -> u32 {
let base = 7; let bias = if cfg.has_bias { 3 } else { 0 }; let qk_norm = if cfg.has_qk_norm { 2 } else { 0 }; let extra_norms = cfg.num_norms.saturating_sub(2) * 1; base + bias + qk_norm + extra_norms
}
fn total_tensors(cfg: &TensorConfig) -> u32 {
let base = if cfg.tied_embeddings { 2 } else { 3 }; base + cfg.layers * per_layer_tensors(cfg)
}
fn quant_bytes(params: u64, scheme: &str) -> u64 {
match scheme {
"Q4K" => params * 18 / 32, "Q6K" => params * 26 / 32, "Q8_0" => params * 34 / 32, "F16" => params * 2,
"F32" => params * 4,
_ => 0,
}
}
fn config_strategy() -> impl Strategy<Value = TensorConfig> {
(
1u32..100,
proptest::bool::ANY,
proptest::bool::ANY,
2u32..4,
proptest::bool::ANY,
)
.prop_map(
|(layers, has_bias, has_qk_norm, num_norms, tied)| TensorConfig {
layers,
has_bias,
has_qk_norm,
num_norms,
tied_embeddings: tied,
},
)
}
proptest! {
#[test]
fn prop_tensor_count_formula(
cfg in config_strategy()
) {
let total = total_tensors(&cfg);
let base = if cfg.tied_embeddings { 2u32 } else { 3u32 };
let expected = base + cfg.layers * per_layer_tensors(&cfg);
prop_assert_eq!(
total, expected,
"tensor count mismatch: total={}, expected base({}) + {}*{}={}",
total, base, cfg.layers, per_layer_tensors(&cfg), expected
);
prop_assert!(total > 0, "tensor count must be > 0, got {}", total);
}
#[test]
fn prop_architecture_delta(
layers in 1u32..100,
a_bias in proptest::bool::ANY,
b_bias in proptest::bool::ANY,
a_qk in proptest::bool::ANY,
b_qk in proptest::bool::ANY
) {
let cfg_a = TensorConfig {
layers,
has_bias: a_bias,
has_qk_norm: a_qk,
num_norms: 2,
tied_embeddings: false,
};
let cfg_b = TensorConfig {
layers,
has_bias: b_bias,
has_qk_norm: b_qk,
num_norms: 2,
tied_embeddings: false,
};
let total_a = total_tensors(&cfg_a);
let total_b = total_tensors(&cfg_b);
let per_layer_a = per_layer_tensors(&cfg_a) as i64;
let per_layer_b = per_layer_tensors(&cfg_b) as i64;
let actual_delta = total_b as i64 - total_a as i64;
let expected_delta = layers as i64 * (per_layer_b - per_layer_a);
prop_assert_eq!(
actual_delta, expected_delta,
"delta mismatch: actual={}, expected L({}) * ({} - {}) = {}",
actual_delta, layers, per_layer_b, per_layer_a, expected_delta
);
}
#[test]
fn prop_tied_embedding_count(
layers in 1u32..100,
has_bias in proptest::bool::ANY,
has_qk_norm in proptest::bool::ANY
) {
let untied = TensorConfig {
layers,
has_bias,
has_qk_norm,
num_norms: 2,
tied_embeddings: false,
};
let tied = TensorConfig {
layers,
has_bias,
has_qk_norm,
num_norms: 2,
tied_embeddings: true,
};
let diff = total_tensors(&untied) - total_tensors(&tied);
prop_assert_eq!(
diff, 1,
"tied vs untied diff={}, expected 1", diff
);
}
#[test]
fn prop_quant_byte_ordering(
params_base in 1u64..1_000_000
) {
let params = params_base * 32;
let q4k = quant_bytes(params, "Q4K");
let q6k = quant_bytes(params, "Q6K");
let q8 = quant_bytes(params, "Q8_0");
let f16 = quant_bytes(params, "F16");
let f32_bytes = quant_bytes(params, "F32");
prop_assert!(
q4k < q6k,
"Q4K({}) >= Q6K({}) for params={}", q4k, q6k, params
);
prop_assert!(
q6k < q8,
"Q6K({}) >= Q8_0({}) for params={}", q6k, q8, params
);
prop_assert!(
q8 < f16,
"Q8_0({}) >= F16({}) for params={}", q8, f16, params
);
prop_assert!(
f16 < f32_bytes,
"F16({}) >= F32({}) for params={}", f16, f32_bytes, params
);
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_equivalence(
_x in proptest::collection::vec(0u32..100, 1..32usize)
) {
}
}