use aprender::autograd::Tensor;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_output_shape_correctness(
m in 1usize..8,
p in 1usize..8,
n in 1usize..8
) {
let a_data: Vec<f32> = (0..m * p).map(|i| (i as f32) * 0.1).collect();
let b_data: Vec<f32> = (0..p * n).map(|i| (i as f32) * 0.1).collect();
let a = Tensor::new(&a_data, &[m, p]);
let b = Tensor::new(&b_data, &[p, n]);
let c = a.matmul(&b);
let shape = c.shape();
prop_assert_eq!(shape[0], m, "expected rows={}, got {}", m, shape[0]);
prop_assert_eq!(shape[1], n, "expected cols={}, got {}", n, shape[1]);
}
#[test]
fn prop_matmul_associativity(
data in proptest::collection::vec(-5.0f32..5.0, 75..76usize)
) {
let m = 3;
let p = 5;
let n = 3;
let q = 2;
let total = m * p + p * n + n * q;
if data.len() < total { return Ok(()); }
let a = Tensor::new(&data[..m * p], &[m, p]);
let b = Tensor::new(&data[m * p..m * p + p * n], &[p, n]);
let c = Tensor::new(&data[m * p + p * n..total], &[n, q]);
let ab = a.matmul(&b);
let left = ab.matmul(&c);
let bc = b.matmul(&c);
let right = a.matmul(&bc);
let left_data = left.data();
let right_data = right.data();
prop_assert_eq!(left_data.len(), right_data.len());
for i in 0..left_data.len() {
let diff = (left_data[i] - right_data[i]).abs();
prop_assert!(
diff < 1e-2,
"associativity: |(AB)C - A(BC)|[{i}]={diff}"
);
}
}
#[test]
fn prop_matmul_distributes(
data in proptest::collection::vec(-5.0f32..5.0, 54..55usize)
) {
let n = 3;
let sz = n * n;
if data.len() < 3 * sz { return Ok(()); }
let a = Tensor::new(&data[..sz], &[n, n]);
let b = Tensor::new(&data[sz..2 * sz], &[n, n]);
let c = Tensor::new(&data[2 * sz..3 * sz], &[n, n]);
let bc_sum_data: Vec<f32> = b.data().iter()
.zip(c.data().iter())
.map(|(x, y)| x + y)
.collect();
let bc_sum = Tensor::new(&bc_sum_data, &[n, n]);
let left = a.matmul(&bc_sum);
let ab = a.matmul(&b);
let ac = a.matmul(&c);
let right_data: Vec<f32> = ab.data().iter()
.zip(ac.data().iter())
.map(|(x, y)| x + y)
.collect();
let left_data = left.data();
for i in 0..left_data.len() {
let diff = (left_data[i] - right_data[i]).abs();
prop_assert!(
diff < 1e-2,
"distributivity: |A(B+C) - AB - AC|[{i}]={diff}"
);
}
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_matches_scalar(
_x in proptest::collection::vec(-5.0f32..5.0, 1..32usize)
) {
}
#[test]
#[ignore = "Quantized dot product not yet implemented"]
fn prop_quantized_error_bounded(
_x in proptest::collection::vec(-5.0f32..5.0, 1..32usize)
) {
}
}
#[test]
fn matmul_identity_right() {
let a = Tensor::new(&[1.0, 2.0, 3.0, 4.0f32], &[2, 2]);
let identity = Tensor::new(&[1.0, 0.0, 0.0, 1.0f32], &[2, 2]);
let result = a.matmul(&identity);
let data = result.data();
assert!((data[0] - 1.0).abs() < 1e-6);
assert!((data[1] - 2.0).abs() < 1e-6);
assert!((data[2] - 3.0).abs() < 1e-6);
assert!((data[3] - 4.0).abs() < 1e-6);
}
#[test]
fn matmul_identity_left() {
let a = Tensor::new(&[1.0, 2.0, 3.0, 4.0f32], &[2, 2]);
let identity = Tensor::new(&[1.0, 0.0, 0.0, 1.0f32], &[2, 2]);
let result = identity.matmul(&a);
let data = result.data();
assert!((data[0] - 1.0).abs() < 1e-6);
assert!((data[1] - 2.0).abs() < 1e-6);
assert!((data[2] - 3.0).abs() < 1e-6);
assert!((data[3] - 4.0).abs() < 1e-6);
}