use aprender::autograd::Tensor;
use aprender::nn::functional::{gelu, relu};
use proptest::prelude::*;
proptest! {
#[test]
fn prop_gelu_at_zero(_dummy in 0..1i32) {
let x = Tensor::new(&[0.0f32], &[1, 1]);
let y = gelu(&x);
let val = y.data()[0];
prop_assert!(
val.abs() < 1e-6,
"GELU(0)={val}, expected 0"
);
}
#[test]
fn prop_gelu_approximation_error(
x_val in -10.0f32..10.0
) {
let x = Tensor::new(&[x_val], &[1, 1]);
let y = gelu(&x);
let val = y.data()[0];
let xf = f64::from(x_val);
let inner = std::f64::consts::FRAC_2_PI.sqrt()
* (xf + 0.044715 * xf.powi(3));
let reference = 0.5 * xf * (1.0 + inner.tanh());
let diff = (f64::from(val) - reference).abs();
prop_assert!(
diff < 1e-3,
"GELU({x_val})={val}, reference={reference}, diff={diff}"
);
}
#[test]
fn prop_silu_at_zero(dummy in 0..1i32) {
let _ = dummy;
let val = aprender::nn::functional::silu_scalar(0.0);
prop_assert!(val.abs() < 1e-7, "SiLU(0) = {val}, expected 0.0");
}
#[test]
fn prop_relu_monotonic(
a in -100.0f32..100.0,
b in -100.0f32..100.0
) {
let x_a = Tensor::new(&[a], &[1, 1]);
let x_b = Tensor::new(&[b], &[1, 1]);
let y_a = relu(&x_a).data()[0];
let y_b = relu(&x_b).data()[0];
if a >= b {
prop_assert!(
y_a >= y_b,
"ReLU not monotonic: a={a} >= b={b} but relu(a)={y_a} < relu(b)={y_b}"
);
}
}
#[test]
fn prop_relu_non_negative(
data in proptest::collection::vec(-1000.0f32..1000.0, 1..128usize)
) {
let n = data.len();
let x = Tensor::new(&data, &[1, n]);
let y = relu(&x);
for (i, &val) in y.data().iter().enumerate() {
prop_assert!(
val >= 0.0,
"ReLU output[{i}]={val}, expected >= 0"
);
}
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_matches_scalar(
_x in proptest::collection::vec(-100.0f32..100.0, 1..32usize)
) {
}
}
#[test]
fn relu_zero_gives_zero() {
let x = Tensor::new(&[0.0f32], &[1, 1]);
let y = relu(&x);
assert!((y.data()[0]).abs() < f32::EPSILON);
}
#[test]
fn relu_positive_passthrough() {
let x = Tensor::new(&[5.0f32], &[1, 1]);
let y = relu(&x);
assert!((y.data()[0] - 5.0).abs() < f32::EPSILON);
}
#[test]
fn relu_negative_clamps_to_zero() {
let x = Tensor::new(&[-5.0f32], &[1, 1]);
let y = relu(&x);
assert!((y.data()[0]).abs() < f32::EPSILON);
}
#[test]
fn gelu_negative_approaches_zero() {
let x = Tensor::new(&[-10.0f32], &[1, 1]);
let y = gelu(&x);
assert!(y.data()[0].abs() < 0.01);
}