use super::*;
#[test]
fn falsify_ap_001_shape_preservation() {
let test_cases = vec![
(1, 5, 32), (2, 10, 64), (1, 1, 16), (4, 50, 128), ];
for (batch, seq_len, d_model) in test_cases {
let mut pe = PositionalEncoding::new(d_model, 100).with_dropout(0.0);
pe.eval();
let x = Tensor::ones(&[batch, seq_len, d_model]);
let y = pe.forward(&x);
assert_eq!(
y.shape(),
x.shape(),
"FALSIFIED AP-001: output shape {:?} != input shape {:?} for (b={batch},s={seq_len},d={d_model})",
y.shape(),
x.shape()
);
}
}
#[test]
fn falsify_ap_002_additive_property() {
let d_model = 32;
let mut pe = PositionalEncoding::new(d_model, 100).with_dropout(0.0);
pe.eval();
let x1 = Tensor::ones(&[1, 10, d_model]);
let x2 = Tensor::new(
&(0..10 * d_model)
.map(|i| (i as f32 * 0.07).sin())
.collect::<Vec<_>>(),
&[1, 10, d_model],
);
let y1 = pe.forward(&x1);
let y2 = pe.forward(&x2);
let pe_from_x1: Vec<f32> = y1
.data()
.iter()
.zip(x1.data().iter())
.map(|(&y, &x)| y - x)
.collect();
let pe_from_x2: Vec<f32> = y2
.data()
.iter()
.zip(x2.data().iter())
.map(|(&y, &x)| y - x)
.collect();
for (i, (&a, &b)) in pe_from_x1.iter().zip(pe_from_x2.iter()).enumerate() {
assert!(
(a - b).abs() < 1e-6,
"FALSIFIED AP-002: PE contribution differs at index {i}: {a} vs {b} \
(PE must be additive and input-independent)"
);
}
let pe_norm: f32 = pe_from_x1.iter().map(|v| v * v).sum();
assert!(
pe_norm > 1e-6,
"FALSIFIED AP-002: PE contribution is all-zero (norm={pe_norm})"
);
}
#[test]
fn falsify_ap_003_finite_output() {
let mut pe = PositionalEncoding::new(64, 200).with_dropout(0.0);
pe.eval();
let x = Tensor::new(
&(0..2 * 100 * 64)
.map(|i| (i as f32 * 0.01).sin())
.collect::<Vec<_>>(),
&[2, 100, 64],
);
let y = pe.forward(&x);
for (i, &val) in y.data().iter().enumerate() {
assert!(
val.is_finite(),
"FALSIFIED AP-003: output[{i}] = {val} (not finite)"
);
}
}
#[test]
fn falsify_ap_004_position_dependent() {
let mut pe = PositionalEncoding::new(32, 100).with_dropout(0.0);
pe.eval();
let x_seq = Tensor::ones(&[1, 51, 32]);
let y_seq = pe.forward(&x_seq);
let d = 32;
let positions = [0, 1, 5, 50];
for i in 0..positions.len() {
for j in (i + 1)..positions.len() {
let pos_a = positions[i];
let pos_b = positions[j];
let diff: f32 = (0..d)
.map(|k| {
let a = y_seq.data()[pos_a * d + k];
let b = y_seq.data()[pos_b * d + k];
(a - b).abs()
})
.sum();
assert!(
diff > 1e-4,
"FALSIFIED AP-004: positions {pos_a} and {pos_b} have identical output (diff={diff})"
);
}
}
}
#[test]
fn falsify_ap_005_deterministic() {
let mut pe = PositionalEncoding::new(32, 50).with_dropout(0.0);
pe.eval();
let x = Tensor::new(
&(0..1 * 10 * 32)
.map(|i| (i as f32 * 0.03).cos())
.collect::<Vec<_>>(),
&[1, 10, 32],
);
let y1 = pe.forward(&x);
let y2 = pe.forward(&x);
assert_eq!(
y1.data(),
y2.data(),
"FALSIFIED AP-005: two forward passes with identical input differ"
);
}
mod proptest_falsify {
use super::*;
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
#[test]
fn falsify_ap_001_prop_shape(
seq_len in 1_usize..64,
d_model in prop::sample::select(vec![16_usize, 32, 48, 64, 128]),
) {
let mut pe = PositionalEncoding::new(d_model, 200).with_dropout(0.0);
pe.eval();
let x = Tensor::ones(&[1, seq_len, d_model]);
let y = pe.forward(&x);
prop_assert_eq!(
y.shape(), x.shape(),
"FALSIFIED AP-001-prop: seq_len={}, d_model={}, output={:?}",
seq_len, d_model, y.shape()
);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(50))]
#[test]
fn falsify_ap_002_prop_additive(
d_model in prop::sample::select(vec![16_usize, 32, 64]),
seq_len in 1_usize..20,
) {
let mut pe = PositionalEncoding::new(d_model, 100).with_dropout(0.0);
pe.eval();
let x1 = Tensor::ones(&[1, seq_len, d_model]);
let x2_data: Vec<f32> = (0..seq_len * d_model)
.map(|i| (i as f32 * 0.13).sin())
.collect();
let x2 = Tensor::new(&x2_data, &[1, seq_len, d_model]);
let y1 = pe.forward(&x1);
let y2 = pe.forward(&x2);
for i in 0..seq_len * d_model {
let pe1 = y1.data()[i] - x1.data()[i];
let pe2 = y2.data()[i] - x2.data()[i];
prop_assert!(
(pe1 - pe2).abs() < 1e-5,
"FALSIFIED AP-002-prop: PE[{i}] differs: {pe1} vs {pe2} (d={d_model}, s={seq_len})"
);
}
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
#[test]
fn falsify_ap_003_prop_finite(
d_model in prop::sample::select(vec![16_usize, 32, 64]),
seq_len in 1_usize..30,
) {
let mut pe = PositionalEncoding::new(d_model, 200).with_dropout(0.0);
pe.eval();
let x_data: Vec<f32> = (0..seq_len * d_model)
.map(|i| (i as f32 * 0.07).cos() * 10.0)
.collect();
let x = Tensor::new(&x_data, &[1, seq_len, d_model]);
let y = pe.forward(&x);
for (i, &v) in y.data().iter().enumerate() {
prop_assert!(
v.is_finite(),
"FALSIFIED AP-003-prop: output[{i}]={v} (d={d_model}, s={seq_len})"
);
}
}
}
}