#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_validates_good_embedding() {
let vocab_size = 100;
let hidden_dim = 64;
let data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
let result = validate_embedding("test", &data, vocab_size, hidden_dim);
assert!(
result.passed,
"Good embedding should pass: {:?}",
result.failures
);
}
#[test]
fn test_rejects_mostly_zero_embedding() {
let vocab_size = 100;
let hidden_dim = 64;
let mut data = vec![0.0f32; vocab_size * hidden_dim];
for i in (vocab_size * 95 / 100 * hidden_dim)..(vocab_size * hidden_dim) {
data[i] = 0.1;
}
let result = validate_embedding("test", &data, vocab_size, hidden_dim);
assert!(!result.passed, "95% zero embedding should fail");
assert!(result.failures.iter().any(|f| f.contains("DENSITY")));
}
#[test]
fn test_rejects_nan_embedding() {
let vocab_size = 10;
let hidden_dim = 8;
let mut data: Vec<f32> = vec![0.1; vocab_size * hidden_dim];
data[5] = f32::NAN;
let result = validate_embedding("test", &data, vocab_size, hidden_dim);
assert!(!result.passed, "NaN embedding should fail");
}
#[test]
fn test_rejects_wrong_shape() {
let data = vec![0.1f32; 1000];
let result = validate_embedding("test", &data, 100, 64);
assert!(!result.passed, "Wrong shape should fail");
}
#[test]
fn falsify_001_validated_embedding_rejects_all_zeros() {
let bad_data = vec![0.0f32; 100 * 64];
let result = ValidatedEmbedding::new(bad_data, 100, 64);
assert!(result.is_err(), "Should reject 100% zeros");
let err = result.unwrap_err();
assert!(err.message.contains("DENSITY"), "Error: {}", err.message);
}
#[test]
fn falsify_001_validated_embedding_rejects_94pct_zeros() {
let vocab_size = 1000;
let hidden_dim = 64;
let mut data = vec![0.0f32; vocab_size * hidden_dim];
for i in (945 * hidden_dim)..(vocab_size * hidden_dim) {
data[i] = 0.1;
}
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "Should reject 94.5% zeros");
}
#[test]
fn falsify_001_validated_embedding_accepts_good_data() {
let vocab_size = 100;
let hidden_dim = 64;
let data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(
result.is_ok(),
"Should accept good data: {:?}",
result.err()
);
}
#[test]
fn falsify_003_validated_embedding_rejects_nan() {
let vocab_size = 10;
let hidden_dim = 8;
let mut data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| i as f32 * 0.01)
.collect();
data[5] = f32::NAN;
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "Should reject NaN");
}
#[test]
fn falsify_004_spot_check_catches_offset_bug() {
let vocab_size = 100;
let hidden_dim = 64;
let mut data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
let token_10_start = 10 * hidden_dim;
for i in token_10_start..(token_10_start + hidden_dim) {
data[i] = 0.0;
}
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "Should catch zero token at 10%");
assert_eq!(result.unwrap_err().rule_id, "F-DATA-QUALITY-004");
}
#[test]
fn falsify_005_rejects_wrong_shape() {
let data = vec![0.1f32; 1000];
let result = ValidatedEmbedding::new(data, 100, 64);
assert!(result.is_err(), "Should reject wrong shape");
}
#[test]
fn validated_weight_rejects_all_zeros() {
let data = vec![0.0f32; 100];
let result = ValidatedWeight::new(data, 10, 10, "test");
assert!(result.is_err());
}
#[test]
fn validated_weight_accepts_good_data() {
let data: Vec<f32> = (0..100).map(|i| i as f32 * 0.01).collect();
let result = ValidatedWeight::new(data, 10, 10, "test");
assert!(result.is_ok());
}
#[test]
fn validated_vector_rejects_wrong_length() {
let data = vec![0.1f32; 50];
let result = ValidatedVector::new(data, 100, "test");
assert!(result.is_err());
}
#[test]
fn validated_vector_accepts_good_data() {
let data = vec![1.0f32; 100];
let result = ValidatedVector::new(data, 100, "test");
assert!(result.is_ok());
}
fn make_valid_transformer(num_layers: usize) -> AprTransformer {
use crate::apr_transformer::{AprTransformerConfig, AprTransformerLayer};
let hidden_dim = 16;
let num_heads = 4;
let num_kv_heads = 4;
let vocab_size = 32;
let intermediate_dim = 64;
let head_dim = hidden_dim / num_heads;
let kv_dim = num_kv_heads * head_dim;
let qkv_out_dim = hidden_dim + 2 * kv_dim;
let config = AprTransformerConfig {
architecture: "test".to_string(),
hidden_dim,
num_layers,
num_heads,
num_kv_heads,
vocab_size,
intermediate_dim,
context_length: 128,
rope_theta: 10000.0,
eps: 1e-6,
eos_token_id: None,
..Default::default()
};
let make_data = |n: usize| -> Vec<f32> {
(0..n)
.map(|i| (i as f32 * 0.01).sin() * 0.1 + 0.05)
.collect()
};
let layers = (0..num_layers)
.map(|_| AprTransformerLayer {
attn_norm_weight: vec![1.0; hidden_dim],
attn_norm_bias: None,
qkv_weight: make_data(qkv_out_dim * hidden_dim),
qkv_bias: None,
attn_output_weight: make_data(hidden_dim * hidden_dim),
attn_output_bias: None,
ffn_gate_weight: Some(make_data(intermediate_dim * hidden_dim)),
ffn_gate_bias: None,
ffn_up_weight: make_data(intermediate_dim * hidden_dim),
ffn_up_bias: None,
ffn_down_weight: make_data(hidden_dim * intermediate_dim),
ffn_down_bias: None,
ffn_norm_weight: Some(vec![1.0; hidden_dim]),
ffn_norm_bias: None,
attn_q_norm_weight: None,
attn_k_norm_weight: None,
linear_attn_z_weight: None,
linear_attn_b_weight: None,
linear_attn_a_weight: None,
linear_attn_conv1d_weight: None,
linear_attn_a_log: None,
linear_attn_dt_bias: None,
linear_attn_norm_weight: None,
moe_gate_weight: None,
moe_expert_gate_up: None,
moe_expert_down: None,
moe_shared_gate: None,
moe_shared_up: None,
moe_shared_down: None,
moe_shared_expert_gate_weight: None,
})
.collect();
AprTransformer {
config,
token_embedding: make_data(vocab_size * hidden_dim),
layers,
output_norm_weight: vec![1.0; hidden_dim],
output_norm_bias: None,
lm_head_weight: make_data(vocab_size * hidden_dim),
lm_head_bias: None,
q4k_layers: None,
lm_head_weight_q6k: None,
lm_head_weight_q4k: None,
}
}
#[test]
fn falsify_validated_transformer_rejects_nan_embedding() {
let mut t = make_valid_transformer(1);
t.token_embedding[5] = f32::NAN;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "Should reject NaN in embedding");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("embedding"), "Error: {err}");
}
#[test]
fn falsify_validated_transformer_rejects_zero_layer_weight() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].qkv_weight.len();
t.layers[0].qkv_weight = vec![0.0; len];
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "Should reject all-zero qkv_weight");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("qkv_weight"), "Error: {err}");
}
#[test]
fn falsify_validated_transformer_rejects_nan_in_deep_layer() {
let mut t = make_valid_transformer(4);
t.layers[3].ffn_up_weight[0] = f32::NAN;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "Should reject NaN in layer 3 ffn_up");
let err = result.unwrap_err();
assert!(
err.tensor_name.contains("layers.3.ffn_up_weight"),
"Error: {err}"
);
}
#[test]
fn falsify_validated_transformer_identifies_tensor_name() {
let mut t = make_valid_transformer(2);
let len = t.lm_head_weight.len();
t.lm_head_weight = vec![0.0; len];
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err());
let err = result.unwrap_err();
assert_eq!(
err.tensor_name, "lm_head_weight",
"Error should name the tensor: {err}"
);
}
#[test]
fn validated_transformer_accepts_good_model() {
let t = make_valid_transformer(2);
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_ok(), "Good model should pass: {:?}", result.err());
}
#[test]
fn validated_transformer_deref_transparent_access() {
let t = make_valid_transformer(1);
let validated = ValidatedAprTransformer::validate(t).expect("validation should pass");
assert_eq!(validated.config.hidden_dim, 16);
assert_eq!(validated.config.num_layers, 1);
assert_eq!(validated.config.vocab_size, 32);
assert!(!validated.token_embedding.is_empty());
assert_eq!(validated.layers.len(), 1);
assert_eq!(validated.config().hidden_dim, 16);
assert_eq!(validated.transformer().config.num_layers, 1);
let inner = validated.into_inner();
assert_eq!(inner.config.hidden_dim, 16);
}
#[test]
fn test_falsify_gh46_embedding_density_threshold_50pct() {
let vocab = 32_usize;
let hidden = 16_usize;
let total = vocab * hidden;
let zero_count = (total as f64 * 0.51).ceil() as usize;
let mut data = vec![1.0_f32; total];
for v in data.iter_mut().take(zero_count) {
*v = 0.0;
}
let result = validate_embedding("test_embed", &data, vocab, hidden);
assert!(
!result.passed,
"GH-46: >50% zero embedding must be rejected, failures: {:?}",
result.failures
);
}
#[test]
fn test_falsify_gh46_weight_density_threshold_80pct() {
let rows = 32_usize;
let cols = 16_usize;
let total = rows * cols;
let zero_count = (total as f64 * 0.81).ceil() as usize;
let mut data = vec![1.0_f32; total];
for v in data.iter_mut().take(zero_count) {
*v = 0.0;
}
let result = validate_weight("test_weight", &data, rows, cols);
assert!(
!result.passed,
"GH-46: >80% zero weight must be rejected, failures: {:?}",
result.failures
);
}
#[test]
fn test_falsify_gh46_rejects_flat_tensor() {
let vocab = 32_usize;
let hidden = 16_usize;
let data = vec![0.5_f32; vocab * hidden];
let result = validate_embedding("test_flat", &data, vocab, hidden);
assert!(
!result.passed,
"GH-46: Flat (constant) embedding must be rejected, failures: {:?}",
result.failures
);
}
#[test]
fn test_falsify_gh46_single_nan_detected() {
let vocab = 32_usize;
let hidden = 16_usize;
let mut data: Vec<f32> = (0..vocab * hidden)
.map(|i| ((i as f32) * 0.01).sin())
.collect();
data[vocab * hidden / 2] = f32::NAN;
let result = validate_embedding("test_nan", &data, vocab, hidden);
assert!(
!result.passed,
"GH-46: Single NaN must be caught by validation"
);
assert!(
result
.failures
.iter()
.any(|f| f.to_lowercase().contains("nan")),
"GH-46: Failure message must mention NaN"
);
}
#[test]
fn falsify_gap4_001_qwen3_without_qk_norm_rejected() {
let mut t = make_valid_transformer(2);
t.config.architecture = "qwen3".to_string();
assert!(t.layers[0].attn_q_norm_weight.is_none());
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "Qwen3 without QK norm must be rejected");
let err = result.unwrap_err();
assert!(
err.message.contains("QK norm") || err.message.contains("attn_q_norm"),
"Error must mention QK norm: {}",
err.message
);
}
#[test]
fn falsify_gap4_002_qwen3_with_qk_norm_passes() {
let mut t = make_valid_transformer(2);
t.config.architecture = "qwen3".to_string();
let head_dim = t.config.hidden_dim / t.config.num_heads;
for layer in &mut t.layers {
layer.attn_q_norm_weight = Some(vec![1.0; head_dim]);
layer.attn_k_norm_weight = Some(vec![1.0; head_dim]);
}
let result = ValidatedAprTransformer::validate(t);
assert!(
result.is_ok(),
"Qwen3 with QK norm should pass: {:?}",
result.err()
);
}
#[test]
fn falsify_gap4_003_qwen2_without_bias_rejected() {
let mut t = make_valid_transformer(1);
t.config.architecture = "qwen2".to_string();
assert!(t.layers[0].qkv_bias.is_none());
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "Qwen2 without bias must be rejected");
}
#[test]
fn falsify_gap4_004_llama_without_optional_passes() {
let mut t = make_valid_transformer(2);
t.config.architecture = "llama".to_string();
let result = ValidatedAprTransformer::validate(t);
assert!(
result.is_ok(),
"LLaMA should pass without QK norm or bias: {:?}",
result.err()
);
}
#[test]
fn falsify_gap4_005_qwen3_missing_norm_on_later_layer() {
let mut t = make_valid_transformer(4);
t.config.architecture = "qwen3".to_string();
let head_dim = t.config.hidden_dim / t.config.num_heads;
for layer in &mut t.layers {
layer.attn_q_norm_weight = Some(vec![1.0; head_dim]);
layer.attn_k_norm_weight = Some(vec![1.0; head_dim]);
}
t.layers[3].attn_k_norm_weight = None;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "Must catch missing QK norm on layer 3");
let err = result.unwrap_err();
assert!(
err.tensor_name.contains("3"),
"Error must identify layer 3: {}",
err.tensor_name
);
}
#[test]
fn falsify_e6a_embedding_rejects_inf() {
let vocab_size = 10;
let hidden_dim = 8;
let mut data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
data[7] = f32::INFINITY;
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "FALSIFY-E6a: Should reject Inf in embedding");
assert_eq!(result.unwrap_err().rule_id, "F-DATA-QUALITY-002");
}
#[test]
fn falsify_e6b_embedding_rejects_neg_inf() {
let vocab_size = 10;
let hidden_dim = 8;
let mut data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
data[3] = f32::NEG_INFINITY;
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "FALSIFY-E6b: Should reject -Inf in embedding");
assert_eq!(result.unwrap_err().rule_id, "F-DATA-QUALITY-002");
}
#[test]
fn falsify_e6c_embedding_rejects_near_zero_l2() {
let vocab_size = 10;
let hidden_dim = 8;
let data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| 1e-8 + (i as f32) * 1e-12)
.collect();
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "FALSIFY-E6c: Near-zero L2 embedding must be rejected");
assert_eq!(result.unwrap_err().rule_id, "F-DATA-QUALITY-003");
}
#[test]
fn falsify_e6d_spot_check_catches_trailing_corruption() {
let vocab_size = 100;
let hidden_dim = 64;
let mut data: Vec<f32> = (0..vocab_size * hidden_dim)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
let token_90_start = 90 * hidden_dim;
for v in &mut data[token_90_start..token_90_start + hidden_dim] {
*v = 0.0;
}
let result = ValidatedEmbedding::new(data, vocab_size, hidden_dim);
assert!(result.is_err(), "FALSIFY-E6d: Must catch zero token at 90%");
assert_eq!(result.unwrap_err().rule_id, "F-DATA-QUALITY-004");
}
#[test]
fn falsify_e6e_zero_vocab_size_rejected() {
let result = ValidatedEmbedding::new(vec![], 0, 64);
assert!(result.is_err(), "FALSIFY-E6e: vocab_size=0 must be rejected");
}
#[test]
fn falsify_e6f_zero_hidden_dim_rejected() {
let result = ValidatedEmbedding::new(vec![], 100, 0);
assert!(result.is_err(), "FALSIFY-E6f: hidden_dim=0 must be rejected");
}
#[test]
fn falsify_e6g_truncated_embedding_rejected() {
let mut t = make_valid_transformer(1);
let len = t.token_embedding.len();
t.token_embedding.truncate(len - 10);
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "FALSIFY-E6g: Truncated embedding must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("embedding"),
"Error must identify embedding: {}", err);
}
#[test]
fn falsify_e6h_oversized_embedding_rejected() {
let mut t = make_valid_transformer(1);
for _ in 0..10 {
t.token_embedding.push(0.1);
}
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(), "FALSIFY-E6h: Oversized embedding must be rejected");
}
#[test]
fn falsify_l1a_validated_weight_rejects_wrong_shape_lm_head() {
let data: Vec<f32> = (0..6400)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
let result = ValidatedWeight::new(data, 200, 64, "lm_head.weight");
assert!(result.is_err(),
"FALSIFY-L1a: Wrong-shape lm_head must be rejected");
assert_eq!(result.unwrap_err().rule_id, "F-LAYOUT-CONTRACT-001");
}
#[test]
fn falsify_l1b_validated_weight_rejects_nan_lm_head() {
let data = vec![f32::NAN; 100 * 64];
let result = ValidatedWeight::new(data, 100, 64, "lm_head.weight");
assert!(result.is_err(),
"FALSIFY-L1b: All-NaN lm_head must be rejected — produces [PAD] garbage");
}
#[test]
fn falsify_l1c_validated_weight_rejects_inf_lm_head() {
let mut data: Vec<f32> = (0..100 * 64)
.map(|i| (i as f32 * 0.01).sin() * 0.1)
.collect();
data[42] = f32::INFINITY;
let result = ValidatedWeight::new(data, 100, 64, "lm_head.weight");
assert!(result.is_err(),
"FALSIFY-L1c: Inf in lm_head must be rejected");
}
#[test]
fn falsify_l2_validated_transformer_catches_zero_lm_head() {
let mut t = make_valid_transformer(1);
let len = t.lm_head_weight.len();
t.lm_head_weight = vec![0.0; len];
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-L2: All-zero lm_head must be rejected by ValidatedAprTransformer");
let err = result.unwrap_err();
assert_eq!(err.tensor_name, "lm_head_weight",
"Error must identify lm_head_weight: {}", err);
}
#[test]
fn falsify_l3_validated_transformer_catches_truncated_lm_head() {
let mut t = make_valid_transformer(1);
let len = t.lm_head_weight.len();
t.lm_head_weight.truncate(len - 10);
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-L3: Truncated lm_head must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("lm_head"),
"Error must identify lm_head: {}", err);
}
#[test]
fn falsify_l4_validated_transformer_catches_oversized_lm_head() {
let mut t = make_valid_transformer(1);
for _ in 0..10 {
t.lm_head_weight.push(0.1);
}
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-L4: Oversized lm_head must be rejected");
}
#[test]
fn falsify_l5_validated_transformer_catches_nan_lm_head() {
let mut t = make_valid_transformer(1);
t.lm_head_weight[10] = f32::NAN;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-L5: NaN in lm_head must be rejected by full validation pipeline");
}
#[test]
fn falsify_a1r_validated_transformer_catches_truncated_qkv() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].qkv_weight.len();
t.layers[0].qkv_weight.truncate(len - 10);
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-A1r: Truncated QKV weight must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("qkv"),
"Error must identify qkv_weight: {}", err);
}
#[test]
fn falsify_a2r_validated_transformer_catches_zero_qkv() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].qkv_weight.len();
t.layers[0].qkv_weight = vec![0.0; len];
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-A2r: All-zero QKV weight must be rejected");
}
#[test]
fn falsify_a3r_validated_transformer_catches_nan_qkv() {
let mut t = make_valid_transformer(1);
t.layers[0].qkv_weight[5] = f32::NAN;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-A3r: NaN in QKV weight must be rejected");
}
#[test]
fn falsify_a4r_validated_transformer_catches_truncated_attn_output() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].attn_output_weight.len();
t.layers[0].attn_output_weight.truncate(len - 5);
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-A4r: Truncated attn_output must be rejected");
}
#[test]
fn falsify_a5r_validated_transformer_catches_zero_attn_output() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].attn_output_weight.len();
t.layers[0].attn_output_weight = vec![0.0; len];
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-A5r: All-zero attn_output must be rejected — disables attention");
}
#[test]
fn falsify_a6r_validated_weight_gqa_fused_qkv_shape() {
let hidden = 16;
let num_heads = 4;
let num_kv_heads = 2; let head_dim = hidden / num_heads; let kv_dim = num_kv_heads * head_dim; let qkv_out = hidden + 2 * kv_dim;
let data: Vec<f32> = (0..qkv_out * hidden)
.map(|i| (i as f32 * 0.01).sin() * 0.1 + 0.05)
.collect();
let result = ValidatedWeight::new(data, qkv_out, hidden, "qkv_weight");
assert!(result.is_ok(),
"FALSIFY-A6r: GQA fused QKV [32, 16] must be valid: {:?}", result.err());
let wrong_qkv_out = 3 * hidden; let wrong_data: Vec<f32> = (0..qkv_out * hidden) .map(|i| (i as f32 * 0.01).sin() * 0.1 + 0.05)
.collect();
let result = ValidatedWeight::new(wrong_data, wrong_qkv_out, hidden, "qkv_weight");
assert!(result.is_err(),
"FALSIFY-A6r: MHA-assumed shape with GQA data must fail");
}
#[test]
fn falsify_f1r_validated_transformer_catches_truncated_gate() {
let mut t = make_valid_transformer(1);
if let Some(ref mut gate) = t.layers[0].ffn_gate_weight {
let len = gate.len();
gate.truncate(len - 5);
}
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-F1r: Truncated gate_proj must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("ffn_gate"),
"Error must identify ffn_gate_weight: {}", err);
}
#[test]
fn falsify_f2r_validated_transformer_catches_zero_gate() {
let mut t = make_valid_transformer(1);
if let Some(ref mut gate) = t.layers[0].ffn_gate_weight {
let len = gate.len();
*gate = vec![0.0; len];
}
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-F2r: All-zero gate_proj must be rejected — disables FFN");
}
#[test]
fn falsify_f3r_validated_transformer_catches_nan_up() {
let mut t = make_valid_transformer(1);
t.layers[0].ffn_up_weight[0] = f32::NAN;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-F3r: NaN in up_proj must be rejected");
}
#[test]
fn falsify_f4r_validated_transformer_catches_truncated_down() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].ffn_down_weight.len();
t.layers[0].ffn_down_weight.truncate(len - 5);
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-F4r: Truncated down_proj must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("ffn_down"),
"Error must identify ffn_down_weight: {}", err);
}
#[test]
fn falsify_f5r_validated_transformer_catches_zero_down() {
let mut t = make_valid_transformer(1);
let len = t.layers[0].ffn_down_weight.len();
t.layers[0].ffn_down_weight = vec![0.0; len];
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-F5r: All-zero down_proj must be rejected — disables FFN");
}
#[test]
fn falsify_n1r_validated_transformer_catches_wrong_attn_norm() {
let mut t = make_valid_transformer(1);
t.layers[0].attn_norm_weight = vec![1.0; 8]; let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-N1r: Wrong-length attn_norm must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("attn_norm"),
"Error must identify attn_norm: {}", err);
}
#[test]
fn falsify_n2r_validated_transformer_catches_nan_ffn_norm() {
let mut t = make_valid_transformer(1);
if let Some(ref mut norm) = t.layers[0].ffn_norm_weight {
norm[0] = f32::NAN;
}
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-N2r: NaN in ffn_norm must be rejected");
}
#[test]
fn falsify_n3r_validated_transformer_catches_wrong_output_norm() {
let mut t = make_valid_transformer(1);
t.output_norm_weight = vec![1.0; 8]; let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-N3r: Wrong-length output_norm must be rejected");
let err = result.unwrap_err();
assert!(err.tensor_name.contains("output_norm"),
"Error must identify output_norm: {}", err);
}
#[test]
fn falsify_n4r_validated_transformer_catches_nan_output_norm() {
let mut t = make_valid_transformer(1);
t.output_norm_weight[5] = f32::NAN;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-N4r: NaN in output_norm must be rejected — produces GH-202 garbage");
}
#[test]
fn falsify_n5r_validated_transformer_catches_inf_attn_norm() {
let mut t = make_valid_transformer(1);
t.layers[0].attn_norm_weight[3] = f32::INFINITY;
let result = ValidatedAprTransformer::validate(t);
assert!(result.is_err(),
"FALSIFY-N5r: Inf in attn_norm must be rejected");
}
}
#[cfg(test)]
#[path = "validation_tests_02.rs"]
mod validation_tests_02;