use aprender::autograd::Tensor;
use aprender::nn::RotaryPositionEmbedding;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_norm_preservation(
data in proptest::collection::vec(-10.0f32..10.0, 4..5usize),
pos in 0usize..32
) {
let head_dim = 4;
let rope = RotaryPositionEmbedding::new(head_dim, 64);
let x = Tensor::new(&data[..head_dim], &[1, 1, 1, head_dim]);
let y = rope.apply(&x, &[pos]);
let x_data = x.data();
let y_data = y.data();
let x_norm: f32 = x_data.iter().map(|v| v * v).sum::<f32>().sqrt();
let y_norm: f32 = y_data.iter().map(|v| v * v).sum::<f32>().sqrt();
let diff = (x_norm - y_norm).abs();
prop_assert!(
diff < 1e-4,
"norm not preserved: |{x_norm} - {y_norm}| = {diff}"
);
}
#[test]
fn prop_relative_position_encoding(
q_data in proptest::collection::vec(-5.0f32..5.0, 4..5usize),
k_data in proptest::collection::vec(-5.0f32..5.0, 4..5usize),
m in 0usize..16,
n in 0usize..16
) {
let head_dim = 4;
let max_seq = 64;
let rope = RotaryPositionEmbedding::new(head_dim, max_seq);
let q = Tensor::new(&q_data[..head_dim], &[1, 1, 1, head_dim]);
let k = Tensor::new(&k_data[..head_dim], &[1, 1, 1, head_dim]);
let q_rot = rope.apply(&q, &[m]);
let k_rot = rope.apply(&k, &[n]);
let dot1: f32 = q_rot.data().iter()
.zip(k_rot.data().iter())
.map(|(a, b)| a * b)
.sum();
let (pos_a, pos_b) = if n >= m { (0, n - m) } else { (m - n, 0) };
let q_rot2 = rope.apply(&q, &[pos_a]);
let k_rot2 = rope.apply(&k, &[pos_b]);
let dot2: f32 = q_rot2.data().iter()
.zip(k_rot2.data().iter())
.map(|(a, b)| a * b)
.sum();
let diff = (dot1 - dot2).abs();
prop_assert!(
diff < 1e-3,
"relative position: dot1={dot1}, dot2={dot2}, diff={diff}"
);
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_matches_scalar(
_x in proptest::collection::vec(-5.0f32..5.0, 1..32usize)
) {
}
#[test]
fn prop_output_bounded_by_input_norm(
data in proptest::collection::vec(-10.0f32..10.0, 4..5usize),
pos in 0usize..32
) {
let head_dim = 4;
let rope = RotaryPositionEmbedding::new(head_dim, 64);
let x = Tensor::new(&data[..head_dim], &[1, 1, 1, head_dim]);
let y = rope.apply(&x, &[pos]);
let x_norm: f32 = x.data().iter().map(|v| v * v).sum::<f32>().sqrt();
let y_norm: f32 = y.data().iter().map(|v| v * v).sum::<f32>().sqrt();
prop_assert!(
y_norm <= x_norm + 1e-4,
"output norm {y_norm} exceeds input norm {x_norm}"
);
}
}
#[test]
fn rope_position_zero_is_identity() {
let head_dim = 4;
let rope = RotaryPositionEmbedding::new(head_dim, 64);
let data = vec![1.0, 2.0, 3.0, 4.0f32];
let x = Tensor::new(&data, &[1, 1, 1, head_dim]);
let y = rope.apply(&x, &[0]);
let y_data = y.data();
for i in 0..head_dim {
let diff = (data[i] - y_data[i]).abs();
assert!(
diff < 1e-5,
"RoPE(x, 0)[{i}]={}, expected {}",
y_data[i],
data[i]
);
}
}