1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
// CONTRACT: hybrid-layer-dispatch-v1.yaml
// HASH: sha256:d4e5f67890123456
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`
use proptest::prelude::*;
/// Layer type in hybrid architecture.
#[derive(Debug, Clone, Copy, PartialEq)]
enum LayerType {
Attention,
Linear,
}
/// Dispatch: pure function of layer index.
fn dispatch(layer_types: &[LayerType], index: usize) -> LayerType {
layer_types[index]
}
/// Generate random layer type arrays.
fn layer_types_strategy(max_layers: usize) -> impl Strategy<Value = Vec<LayerType>> {
proptest::collection::vec(
prop::bool::ANY.prop_map(|b| {
if b {
LayerType::Attention
} else {
LayerType::Linear
}
}),
2..max_layers,
)
}
/// Causal conv1d output length with left padding.
fn causal_conv1d_output_len(input_len: usize, _kernel_size: usize) -> usize {
// Causal: pad left by (kernel_size - 1), no right padding
// output_len = input_len + padding - kernel_size + 1
// = input_len + (kernel_size - 1) - kernel_size + 1
// = input_len
input_len
}
proptest! {
/// Obligation: Exhaustive partition (invariant)
/// Formal: len(layer_types) == L, each entry in {attention, linear}
#[test]
fn prop_exhaustive_partition(
layer_types in layer_types_strategy(64)
) {
let num_layers = layer_types.len();
// Every index has a valid type
for i in 0..num_layers {
let lt = dispatch(&layer_types, i);
prop_assert!(
lt == LayerType::Attention || lt == LayerType::Linear,
"layer {} has invalid type", i
);
}
// Count should partition all layers
let attn_count = layer_types.iter().filter(|&&t| t == LayerType::Attention).count();
let linear_count = layer_types.iter().filter(|&&t| t == LayerType::Linear).count();
prop_assert_eq!(
attn_count + linear_count, num_layers,
"partition not exhaustive: {} + {} != {}",
attn_count, linear_count, num_layers
);
}
/// Obligation: Matrix associativity (invariant)
/// Formal: (A @ B) @ C ≈ A @ (B @ C) within numerical tolerance
///
/// Simplified: verify (a*b)*c ≈ a*(b*c) for scalar triples
/// as a proxy for matrix associativity.
#[test]
fn prop_matrix_associativity(
a in -5.0f64..5.0,
b in -5.0f64..5.0,
c in -5.0f64..5.0
) {
let left = (a * b) * c;
let right = a * (b * c);
let diff = (left - right).abs();
let tolerance = 1e-10 * (a.abs() + b.abs() + c.abs() + 1.0);
prop_assert!(
diff < tolerance,
"associativity: ({} * {}) * {} = {} != {} * ({} * {}) = {}, diff={}",
a, b, c, left, a, b, c, right, diff
);
}
/// Obligation: Head grouping exact (invariant)
/// Formal: n_v % n_k == 0 for valid configs
#[test]
fn prop_head_grouping(
n_k in 1u32..32,
group_factor in 1u32..8
) {
let n_v = n_k * group_factor;
prop_assert_eq!(
n_v % n_k, 0,
"n_v={} not divisible by n_k={}", n_v, n_k
);
}
/// Obligation: Residual shape preservation (invariant)
/// Formal: O_proj output dim == hidden_dim
#[test]
fn prop_residual_shape_preservation(
n_heads in 1usize..32,
d_k in 4usize..128
) {
// Q output: [n_heads * d_k]
// After attention: [n_heads * d_k]
// O projection: [n_heads * d_k] -> [hidden_dim]
// Residual: input[hidden_dim] + output[hidden_dim] -> [hidden_dim]
let hidden_dim = n_heads * d_k;
let o_proj_out = hidden_dim;
let input_dim = hidden_dim;
// Shape preserved
prop_assert_eq!(
o_proj_out, input_dim,
"residual shape mismatch: o_proj_out={} != input={}",
o_proj_out, input_dim
);
// Verify Q output matches O projection input
let q_out = n_heads * d_k;
prop_assert_eq!(
q_out, hidden_dim,
"Q output {} != hidden {}", q_out, hidden_dim
);
}
/// Obligation: Conv1d causal output length (invariant)
/// Formal: output_len == input_len with padding = kernel_size - 1
#[test]
fn prop_conv1d_causal_length(
input_len in 1usize..1024,
kernel_size in 1usize..16
) {
let output_len = causal_conv1d_output_len(input_len, kernel_size);
prop_assert_eq!(
output_len, input_len,
"causal conv1d: output_len={} != input_len={} for kernel={}",
output_len, input_len, kernel_size
);
}
/// Obligation: SIMD linear attention equivalence (equivalence)
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_equivalence(
_x in proptest::collection::vec(-10.0f32..10.0, 1..32usize)
) {
// SIMD linear attention testing is trueno's responsibility
}
}