1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
//! FALSIFY contract tests for Graph Neural Network operations.
//!
//! Verifies invariants of GCN aggregation and graph pooling.
// CONTRACT: gnn-v1.yaml
// HASH: sha256:b7e4f8a2c1d09356
// Generated by: pv probar --binding
// DO NOT EDIT -- regenerate with `pv probar --binding`
use aprender::autograd::Tensor;
use aprender::gnn::{global_max_pool, global_mean_pool};
use aprender::nn::gnn::{AdjacencyMatrix, GCNConv};
use proptest::prelude::*;
/// LCG pseudo-random number generator step.
fn lcg_next(state: &mut u64) -> f32 {
*state = state
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1);
(*state >> 33) as f32 / u32::MAX as f32
}
/// Generate deterministic node feature matrix [num_nodes, num_features] using LCG.
fn make_node_features(rng: &mut u64, num_nodes: usize, num_features: usize) -> Tensor {
let mut data = Vec::with_capacity(num_nodes * num_features);
for _ in 0..(num_nodes * num_features) {
data.push(lcg_next(rng) * 2.0 - 1.0); // range [-1, 1]
}
Tensor::new(&data, &[num_nodes, num_features])
}
/// Generate a simple chain graph: 0->1->2->...->n-1 with self-loops.
fn make_chain_adj(num_nodes: usize) -> AdjacencyMatrix {
let mut edges = Vec::new();
for i in 0..num_nodes.saturating_sub(1) {
edges.push([i, i + 1]);
}
AdjacencyMatrix::from_edge_index(&edges, num_nodes).add_self_loops()
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(256))]
// ---------------------------------------------------------------
// FALSIFY-GNN-001: GCN forward preserves node count
//
// For any graph with N nodes and input [N, in_features], GCNConv
// must produce output [N, out_features].
// ---------------------------------------------------------------
#[test]
fn prop_gcn_forward_preserves_node_count(
num_nodes in 2usize..8,
in_features in 2usize..16,
out_features in 2usize..16,
seed in 0u64..10000,
) {
let mut rng = seed;
let x = make_node_features(&mut rng, num_nodes, in_features);
let adj = make_chain_adj(num_nodes);
let gcn = GCNConv::new(in_features, out_features);
let out = gcn.forward(&x, &adj);
// Invariant: output row count == input row count
prop_assert_eq!(
out.shape()[0], num_nodes,
"FALSIFY-GNN-001: output nodes {} != input nodes {}",
out.shape()[0], num_nodes
);
// Invariant: output column count == out_features
prop_assert_eq!(
out.shape()[1], out_features,
"FALSIFY-GNN-001: output features {} != expected {}",
out.shape()[1], out_features
);
// All output values must be finite
for (i, &v) in out.data().iter().enumerate() {
prop_assert!(
v.is_finite(),
"FALSIFY-GNN-001: output[{}] = {} is not finite",
i, v
);
}
}
// ---------------------------------------------------------------
// FALSIFY-GNN-002: Global mean pool output is finite
//
// For any node feature matrix [N, F], global_mean_pool must
// produce a [1, F] tensor with all finite values, and each
// pooled value must lie within [min_input, max_input].
// ---------------------------------------------------------------
#[test]
fn prop_global_mean_pool_finite(
num_nodes in 1usize..16,
num_features in 1usize..16,
seed in 0u64..10000,
) {
let mut rng = seed;
let x = make_node_features(&mut rng, num_nodes, num_features);
let pooled = global_mean_pool(&x, None);
// Shape invariant: [1, num_features]
prop_assert_eq!(
pooled.shape(),
&[1, num_features],
"FALSIFY-GNN-002: pooled shape {:?} != [1, {}]",
pooled.shape(), num_features
);
let x_data = x.data();
let pooled_data = pooled.data();
for f in 0..num_features {
let pooled_val = pooled_data[f];
// Finiteness
prop_assert!(
pooled_val.is_finite(),
"FALSIFY-GNN-002: mean_pool[{}] = {} is not finite",
f, pooled_val
);
// Mean must lie within [min, max] of input column
let col_min = (0..num_nodes)
.map(|n| x_data[n * num_features + f])
.fold(f32::INFINITY, f32::min);
let col_max = (0..num_nodes)
.map(|n| x_data[n * num_features + f])
.fold(f32::NEG_INFINITY, f32::max);
prop_assert!(
pooled_val >= col_min - 1e-6 && pooled_val <= col_max + 1e-6,
"FALSIFY-GNN-002: mean_pool[{}] = {} not in [{}, {}]",
f, pooled_val, col_min, col_max
);
}
}
// ---------------------------------------------------------------
// FALSIFY-GNN-003: Global max pool bounded by input
//
// For any node feature matrix [N, F], global_max_pool must
// produce a [1, F] tensor where each value equals the column
// maximum of the input.
// ---------------------------------------------------------------
#[test]
fn prop_global_max_pool_bounded(
num_nodes in 1usize..16,
num_features in 1usize..16,
seed in 0u64..10000,
) {
let mut rng = seed;
let x = make_node_features(&mut rng, num_nodes, num_features);
let pooled = global_max_pool(&x, None);
// Shape invariant: [1, num_features]
prop_assert_eq!(
pooled.shape(),
&[1, num_features],
"FALSIFY-GNN-003: pooled shape {:?} != [1, {}]",
pooled.shape(), num_features
);
let x_data = x.data();
let pooled_data = pooled.data();
for f in 0..num_features {
let pooled_val = pooled_data[f];
// Finiteness
prop_assert!(
pooled_val.is_finite(),
"FALSIFY-GNN-003: max_pool[{}] = {} is not finite",
f, pooled_val
);
// Max pooled value must equal column max
let col_max = (0..num_nodes)
.map(|n| x_data[n * num_features + f])
.fold(f32::NEG_INFINITY, f32::max);
prop_assert!(
(pooled_val - col_max).abs() < 1e-6,
"FALSIFY-GNN-003: max_pool[{}] = {} != col_max {}",
f, pooled_val, col_max
);
// Redundant bound check: pooled <= max input value globally
let global_max = x_data.iter().copied().fold(f32::NEG_INFINITY, f32::max);
prop_assert!(
pooled_val <= global_max + 1e-6,
"FALSIFY-GNN-003: max_pool[{}] = {} exceeds global max {}",
f, pooled_val, global_max
);
}
}
}