use aprender::autograd::Tensor;
use aprender::nn::Module;
use aprender::nn::RMSNorm;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_output_is_finite(
data in proptest::collection::vec(-100.0f32..100.0, 1..64usize)
) {
let n = data.len();
let norm = RMSNorm::new(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
for (i, &val) in y.data().iter().enumerate() {
prop_assert!(
val.is_finite(),
"output[{i}]={val} is not finite"
);
}
}
#[test]
fn prop_scale_invariance(
data in proptest::collection::vec(-10.0f32..10.0, 2..32usize),
alpha in prop::num::f32::NORMAL.prop_filter(
"nonzero alpha",
|a| a.abs() > 0.01 && a.abs() < 100.0
)
) {
let n = data.len();
let norm = RMSNorm::without_affine(&[n]);
let x = Tensor::new(&data, &[1, n]);
let scaled: Vec<f32> = data.iter().map(|&v| v * alpha).collect();
let x_scaled = Tensor::new(&scaled, &[1, n]);
let y_orig = norm.forward(&x);
let y_scaled = norm.forward(&x_scaled);
let sign = alpha.signum();
let orig_data = y_orig.data();
let scaled_data = y_scaled.data();
for i in 0..n {
let expected = sign * orig_data[i];
let diff = (expected - scaled_data[i]).abs();
let tol = if alpha.abs() < 0.1 { 0.05 } else { 1e-3 };
prop_assert!(
diff < tol,
"scale invariance: sign*y[{i}]={expected} vs y_scaled[{i}]={}, diff={diff}",
scaled_data[i]
);
}
}
#[test]
fn prop_rms_denominator_positive(
data in proptest::collection::vec(-100.0f32..100.0, 1..64usize)
) {
let n = data.len();
let norm = RMSNorm::new(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
for &val in y.data() {
prop_assert!(
val.is_finite(),
"non-finite output implies RMS denominator issue"
);
}
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_matches_scalar_within_ulp(
_x in proptest::collection::vec(-100.0f32..100.0, 1..32usize)
) {
}
#[test]
fn prop_normalized_rms_approx_1(
data in proptest::collection::vec(-10.0f32..10.0, 2..32usize)
.prop_filter("non-zero", |d| d.iter().any(|v| v.abs() > 0.01))
) {
let n = data.len();
let norm = RMSNorm::without_affine(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
let y_data = y.data();
let sum_sq: f32 = y_data.iter().map(|v| v * v).sum();
let rms = (sum_sq / n as f32).sqrt();
prop_assert!(
(rms - 1.0).abs() < 0.1,
"RMS of normalized output = {rms}, expected ~1.0"
);
}
}
#[test]
fn falsify_norm_001_rmsnorm_unit_rms() {
let data: Vec<f32> = (0..128).map(|i| (i as f32 - 64.0) * 0.1).collect();
let n = data.len();
let norm = RMSNorm::new(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
let sum_sq: f32 = y.data().iter().map(|v| v * v).sum();
let rms = (sum_sq / n as f32).sqrt();
assert!(
(rms - 1.0).abs() < 0.15,
"FALSIFY-NORM-001: RMS={rms}, expected ~1.0"
);
}
#[test]
fn falsify_norm_002_rmsnorm_zero_input() {
let n = 64;
let norm = RMSNorm::new(&[n]);
let x = Tensor::new(&vec![0.0f32; n], &[1, n]);
let y = norm.forward(&x);
for &v in y.data() {
assert!(v.is_finite(), "FALSIFY-NORM-002: NaN/Inf from zero input");
}
}
#[test]
fn falsify_norm_003_rmsnorm_sign_preservation() {
let data = vec![1.0f32, -1.0, 2.0, -2.0, 0.5, -0.5];
let n = data.len();
let norm = RMSNorm::new(&[n]);
let x = Tensor::new(&data, &[1, n]);
let y = norm.forward(&x);
for i in 0..n {
let input_sign = data[i].signum();
let output_sign = y.data()[i].signum();
assert_eq!(
input_sign,
output_sign,
"FALSIFY-NORM-003: sign flipped at [{i}]: input={}, output={}",
data[i],
y.data()[i]
);
}
}