use aprender::autograd::Tensor;
use aprender::nn::LayerNorm;
use aprender::nn::Module;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_centering(
data in proptest::collection::vec(-10.0f32..10.0, 4..64usize)
.prop_filter("non-constant", |d| {
let min = d.iter().cloned().fold(f32::INFINITY, f32::min);
let max = d.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
(max - min) > 0.01
})
) {
let n = data.len();
let norm = LayerNorm::without_affine(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
let y_data = y.data();
let mean: f32 = y_data.iter().sum::<f32>() / n as f32;
prop_assert!(
mean.abs() < 1e-4,
"LN output mean={mean}, expected ~0"
);
}
#[test]
fn prop_standardization(
data in proptest::collection::vec(-10.0f32..10.0, 4..64usize)
.prop_filter("non-constant", |d| {
let min = d.iter().cloned().fold(f32::INFINITY, f32::min);
let max = d.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
(max - min) > 0.01
})
) {
let n = data.len();
let norm = LayerNorm::without_affine(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
let y_data = y.data();
let mean: f32 = y_data.iter().sum::<f32>() / n as f32;
let var: f32 = y_data.iter().map(|v| (v - mean).powi(2)).sum::<f32>() / n as f32;
prop_assert!(
(var - 1.0).abs() < 0.1,
"LN output variance={var}, expected ~1.0"
);
}
#[test]
fn prop_denominator_positive(
data in proptest::collection::vec(-100.0f32..100.0, 2..64usize)
) {
let n = data.len();
let norm = LayerNorm::new(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
for (i, &val) in y.data().iter().enumerate() {
prop_assert!(
val.is_finite(),
"output[{i}]={val} is not finite — denominator may be non-positive"
);
}
}
#[test]
fn prop_idempotency(
data in proptest::collection::vec(-10.0f32..10.0, 4..64usize)
.prop_filter("non-constant", |d| {
let min = d.iter().cloned().fold(f32::INFINITY, f32::min);
let max = d.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
(max - min) > 0.01
})
) {
let n = data.len();
let norm = LayerNorm::without_affine(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y1 = norm.forward(&x);
let y2 = norm.forward(&y1);
let y1_data = y1.data();
let y2_data = y2.data();
for i in 0..n {
let diff = (y1_data[i] - y2_data[i]).abs();
prop_assert!(
diff < 1e-3,
"idempotency: y1[{i}]={} vs y2[{i}]={}, diff={diff}",
y1_data[i], y2_data[i]
);
}
}
#[test]
fn prop_shift_invariance(
data in proptest::collection::vec(-10.0f32..10.0, 4..64usize)
.prop_filter("non-constant", |d| {
let min = d.iter().cloned().fold(f32::INFINITY, f32::min);
let max = d.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
(max - min) > 0.01
}),
c in -50.0f32..50.0
) {
let n = data.len();
let norm = LayerNorm::without_affine(&[n]);
let x = Tensor::new(&data, &[1, n]);
let shifted: Vec<f32> = data.iter().map(|&v| v + c).collect();
let x_shifted = Tensor::new(&shifted, &[1, n]);
let y_orig = norm.forward(&x);
let y_shifted = norm.forward(&x_shifted);
let orig_data = y_orig.data();
let shifted_data = y_shifted.data();
for i in 0..n {
let diff = (orig_data[i] - shifted_data[i]).abs();
prop_assert!(
diff < 1e-3,
"shift invariance: y[{i}] diff={diff}"
);
}
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_equivalence(
_x in proptest::collection::vec(-100.0f32..100.0, 1..32usize)
) {
}
}