1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
//! FALSIFY contract tests for Conjugate Gradient optimization.
//!
//! Verifies mathematical invariants of the CG optimizer:
//! - Monotone descent on convex objectives
//! - Numerical stability (finite outputs)
//! - Convergence on positive-definite quadratics
// CONTRACT: conjugate-gradient-v1.yaml
// HASH: sha256:d4e5f6a7b8c90123
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`
use aprender::optim::{CGBetaFormula, ConjugateGradient, Optimizer};
use aprender::primitives::Vector;
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(256))]
// ──────────────────────────────────────────────────────────
// FALSIFY-OPT-001: Minimized value <= initial value
// Formal: f(x*) <= f(x0) for convex quadratic objectives
// ──────────────────────────────────────────────────────────
/// Obligation: CG never increases the objective on convex functions
#[test]
fn prop_minimized_leq_initial(
dim in 2usize..5,
seed in 0u64..10000,
) {
// Generate initial point and coefficients via LCG
let mut rng = seed;
let mut x0_data = Vec::with_capacity(dim);
let mut coeffs = Vec::with_capacity(dim);
for _ in 0..dim {
rng = rng.wrapping_mul(6_364_136_223_846_793_005).wrapping_add(1);
let val = ((rng >> 33) as f32 / (u32::MAX >> 1) as f32) * 20.0 - 10.0;
x0_data.push(val);
rng = rng.wrapping_mul(6_364_136_223_846_793_005).wrapping_add(1);
let coeff = ((rng >> 33) as f32 / (u32::MAX >> 1) as f32) * 4.9 + 0.1;
coeffs.push(coeff);
}
let x0 = Vector::from_slice(&x0_data);
let coeffs_f = coeffs.clone();
let coeffs_g = coeffs.clone();
// f(x) = sum(a_i * x_i^2)
let f = move |x: &Vector<f32>| -> f32 {
let mut val = 0.0f32;
for i in 0..x.len() {
val += coeffs_f[i] * x[i] * x[i];
}
val
};
// g(x) = [2*a_1*x_1, 2*a_2*x_2, ...]
let g = move |x: &Vector<f32>| -> Vector<f32> {
let grad: Vec<f32> = (0..x.len())
.map(|i| 2.0 * coeffs_g[i] * x[i])
.collect();
Vector::from_slice(&grad)
};
let f_initial = {
let coeffs_init = coeffs.clone();
let mut val = 0.0f32;
for i in 0..dim {
val += coeffs_init[i] * x0_data[i] * x0_data[i];
}
val
};
let mut opt = ConjugateGradient::new(100, 1e-6, CGBetaFormula::FletcherReeves);
let result = opt.minimize(f, g, x0);
prop_assert!(
result.objective_value <= f_initial + 1e-5,
"FALSIFY-OPT-001: f(x*)={} > f(x0)={}, CG increased objective",
result.objective_value, f_initial
);
}
// ──────────────────────────────────────────────────────────
// FALSIFY-OPT-002: Optimal point is finite (no NaN/Inf)
// Formal: forall i, x*_i is finite
// ──────────────────────────────────────────────────────────
/// Obligation: CG produces finite solution vectors
#[test]
fn prop_optimal_point_finite(
dim in 2usize..5,
seed in 0u64..10000,
) {
// Generate initial point and coefficients via LCG
let mut rng = seed;
let mut x0_data = Vec::with_capacity(dim);
let mut coeffs = Vec::with_capacity(dim);
for _ in 0..dim {
rng = rng.wrapping_mul(6_364_136_223_846_793_005).wrapping_add(1);
let val = ((rng >> 33) as f32 / (u32::MAX >> 1) as f32) * 20.0 - 10.0;
x0_data.push(val);
rng = rng.wrapping_mul(6_364_136_223_846_793_005).wrapping_add(1);
let coeff = ((rng >> 33) as f32 / (u32::MAX >> 1) as f32) * 4.9 + 0.1;
coeffs.push(coeff);
}
let x0 = Vector::from_slice(&x0_data);
let coeffs_f = coeffs.clone();
let coeffs_g = coeffs;
let f = move |x: &Vector<f32>| -> f32 {
let mut val = 0.0f32;
for i in 0..x.len() {
val += coeffs_f[i] * x[i] * x[i];
}
val
};
let g = move |x: &Vector<f32>| -> Vector<f32> {
let grad: Vec<f32> = (0..x.len())
.map(|i| 2.0 * coeffs_g[i] * x[i])
.collect();
Vector::from_slice(&grad)
};
let mut opt = ConjugateGradient::new(100, 1e-6, CGBetaFormula::PolakRibiere);
let result = opt.minimize(f, g, x0);
// All components of the solution must be finite
for i in 0..result.solution.len() {
prop_assert!(
result.solution[i].is_finite(),
"FALSIFY-OPT-002: solution[{}]={}, expected finite",
i, result.solution[i]
);
}
// Objective value must also be finite
prop_assert!(
result.objective_value.is_finite(),
"FALSIFY-OPT-002: objective_value={}, expected finite",
result.objective_value
);
}
// ──────────────────────────────────────────────────────────
// FALSIFY-OPT-003: Quadratic function converges
// Formal: for f(x) = x^T A x with PD diagonal A,
// CG converges to x* near 0 with f(x*) near 0
// ──────────────────────────────────────────────────────────
/// Obligation: CG converges on positive-definite quadratics
#[test]
fn prop_quadratic_converges(
dim in 2usize..5,
seed in 0u64..10000,
) {
// Generate initial point and PD coefficients via LCG
let mut rng = seed;
let mut x0_data = Vec::with_capacity(dim);
let mut coeffs = Vec::with_capacity(dim);
for _ in 0..dim {
rng = rng.wrapping_mul(6_364_136_223_846_793_005).wrapping_add(1);
let val = ((rng >> 33) as f32 / (u32::MAX >> 1) as f32) * 20.0 - 10.0;
x0_data.push(val);
// Positive coefficients ensure PD diagonal matrix
rng = rng.wrapping_mul(6_364_136_223_846_793_005).wrapping_add(1);
let coeff = ((rng >> 33) as f32 / (u32::MAX >> 1) as f32) * 4.9 + 0.1;
coeffs.push(coeff);
}
let x0 = Vector::from_slice(&x0_data);
let coeffs_f = coeffs.clone();
let coeffs_g = coeffs;
// f(x) = x^T diag(a) x = sum(a_i * x_i^2), minimum at origin
let f = move |x: &Vector<f32>| -> f32 {
let mut val = 0.0f32;
for i in 0..x.len() {
val += coeffs_f[i] * x[i] * x[i];
}
val
};
// g(x) = 2 * diag(a) * x
let g = move |x: &Vector<f32>| -> Vector<f32> {
let grad: Vec<f32> = (0..x.len())
.map(|i| 2.0 * coeffs_g[i] * x[i])
.collect();
Vector::from_slice(&grad)
};
let mut opt = ConjugateGradient::new(200, 1e-8, CGBetaFormula::FletcherReeves);
let result = opt.minimize(f, g, x0);
// On a PD quadratic, CG should converge close to the minimum (origin)
// Allow generous tolerance since line search may not be exact
prop_assert!(
result.objective_value < 1.0,
"FALSIFY-OPT-003: f(x*)={}, expected < 1.0 for PD quadratic convergence",
result.objective_value
);
// Each component should be near zero
for i in 0..result.solution.len() {
prop_assert!(
result.solution[i].abs() < 5.0,
"FALSIFY-OPT-003: solution[{}]={}, expected |x_i| < 5.0 near origin",
i, result.solution[i]
);
}
}
/// Obligation: SIMD matches scalar within ULP (equivalence)
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_equivalence(
_x in proptest::collection::vec(-10.0f32..10.0, 1..32usize)
) {
// SIMD equivalence testing is trueno's responsibility
}
}