use aprender::autograd::Tensor;
use aprender::nn::Conv1d;
use aprender::nn::Module;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_output_shape(
length in 10usize..32,
kernel in 1usize..5
) {
let in_ch = 2;
let out_ch = 3;
let stride = 1;
let padding = 0;
let expected_l_out = (length + 2 * padding - kernel) / stride + 1;
let conv = Conv1d::new(in_ch, out_ch, kernel);
let data: Vec<f32> = (0..in_ch * length).map(|i| (i as f32) * 0.01).collect();
let x = Tensor::new(&data, &[1, in_ch, length]);
let y = conv.forward(&x);
let shape = y.shape();
prop_assert_eq!(shape[0], 1);
prop_assert_eq!(shape[1], out_ch);
prop_assert_eq!(shape[2], expected_l_out);
}
#[test]
fn prop_linearity(
a_val in -2.0f32..2.0,
b_val in -2.0f32..2.0
) {
let in_ch = 1;
let out_ch = 1;
let kernel = 3;
let length = 8;
let conv = Conv1d::with_options(in_ch, out_ch, kernel, 1, 0, false);
let x: Vec<f32> = (0..in_ch * length).map(|i| (i as f32) * 0.1).collect();
let z: Vec<f32> = (0..in_ch * length).map(|i| ((i as f32) * 0.1).sin()).collect();
let combo: Vec<f32> = x.iter().zip(z.iter())
.map(|(&xi, &zi)| a_val * xi + b_val * zi)
.collect();
let x_t = Tensor::new(&x, &[1, in_ch, length]);
let z_t = Tensor::new(&z, &[1, in_ch, length]);
let combo_t = Tensor::new(&combo, &[1, in_ch, length]);
let y_combo = conv.forward(&combo_t);
let y_x = conv.forward(&x_t);
let y_z = conv.forward(&z_t);
let combo_data = y_combo.data();
let x_data = y_x.data();
let z_data = y_z.data();
for i in 0..combo_data.len() {
let expected = a_val * x_data[i] + b_val * z_data[i];
let diff = (combo_data[i] - expected).abs();
prop_assert!(
diff < 1e-2,
"linearity: y_combo[{}]={} vs expected={}, diff={}",
i, combo_data[i], expected, diff
);
}
}
#[test]
fn prop_output_bounded(
data in proptest::collection::vec(-1.0f32..1.0, 16..17usize)
) {
let in_ch = 1;
let out_ch = 1;
let kernel = 3;
let length = 16;
let data: Vec<f32> = (0..in_ch * length)
.map(|i| if i < data.len() { data[i] } else { 0.0 })
.collect();
let conv = Conv1d::new(in_ch, out_ch, kernel);
let x = Tensor::new(&data, &[1, in_ch, length]);
let y = conv.forward(&x);
for &val in y.data() {
prop_assert!(
val.is_finite(),
"output is not finite"
);
}
}
#[test]
fn prop_kernel_size_1_is_linear(
data in proptest::collection::vec(-5.0f32..5.0, 8..9usize)
) {
let in_ch = 1;
let out_ch = 1;
let length = 8;
let data: Vec<f32> = (0..in_ch * length)
.map(|i| if i < data.len() { data[i] } else { 0.0 })
.collect();
let conv = Conv1d::new(in_ch, out_ch, 1);
let x = Tensor::new(&data, &[1, in_ch, length]);
let y = conv.forward(&x);
prop_assert_eq!(y.shape()[2], length);
}
#[test]
#[ignore = "SIMD equivalence — trueno domain"]
fn prop_simd_equivalence(
_x in proptest::collection::vec(-5.0f32..5.0, 1..32usize)
) {
}
}