pub(crate) use super::*;
#[test]
fn test_linear_forward_shape() {
let layer = Linear::new(10, 5);
let x = Tensor::ones(&[32, 10]);
let output = layer.forward(&x);
assert_eq!(output.shape(), &[32, 5]);
}
#[test]
fn test_linear_parameters() {
let layer = Linear::new(10, 5);
let params = layer.parameters();
assert_eq!(params.len(), 2); assert_eq!(params[0].shape(), &[5, 10]); assert_eq!(params[1].shape(), &[5]); }
#[test]
fn test_linear_without_bias() {
let layer = Linear::without_bias(10, 5);
let params = layer.parameters();
assert_eq!(params.len(), 1); assert!(!layer.has_bias());
}
#[test]
fn test_linear_num_parameters() {
let layer = Linear::new(10, 5);
assert_eq!(layer.num_parameters(), 55);
}
#[test]
fn test_linear_reproducible() {
let layer1 = Linear::with_seed(10, 5, Some(42));
let layer2 = Linear::with_seed(10, 5, Some(42));
assert_eq!(layer1.weight.data(), layer2.weight.data());
}
#[test]
fn test_linear_identity_like() {
let mut layer = Linear::with_seed(3, 3, Some(42));
let identity = Tensor::new(&[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], &[3, 3]);
let zero_bias = Tensor::zeros(&[3]);
layer.set_weight(identity.requires_grad());
layer.set_bias(zero_bias.requires_grad());
let x = Tensor::new(&[1.0, 2.0, 3.0], &[1, 3]);
let output = layer.forward(&x);
assert_eq!(output.shape(), &[1, 3]);
let out_data = output.data();
assert!((out_data[0] - 1.0).abs() < 1e-5);
assert!((out_data[1] - 2.0).abs() < 1e-5);
assert!((out_data[2] - 3.0).abs() < 1e-5);
}
#[test]
fn test_linear_with_bias() {
let mut layer = Linear::with_seed(2, 2, Some(42));
layer.set_weight(Tensor::new(&[1.0, 0.0, 0.0, 1.0], &[2, 2]).requires_grad());
layer.set_bias(Tensor::new(&[10.0, 20.0], &[2]).requires_grad());
let x = Tensor::new(&[1.0, 2.0], &[1, 2]);
let output = layer.forward(&x);
let out_data = output.data();
assert!((out_data[0] - 11.0).abs() < 1e-5);
assert!((out_data[1] - 22.0).abs() < 1e-5);
}
#[test]
fn test_placeholder_is_not_ready() {
let layer = Linear::placeholder(64, 128);
assert!(!layer.is_ready(), "Placeholder must not be ready");
}
#[test]
fn test_new_is_ready() {
let layer = Linear::new(64, 128);
assert!(layer.is_ready(), "Linear::new() must be ready");
}
#[test]
fn test_set_weight_makes_ready() {
let mut layer = Linear::placeholder(32, 64);
assert!(!layer.is_ready(), "Precondition");
let weight = Tensor::ones(&[64, 32]);
layer.set_weight(weight);
assert!(layer.is_ready(), "set_weight must make layer ready");
}
#[test]
fn test_is_ready_implies_forward_succeeds() {
let layer = Linear::new(8, 4);
assert!(layer.is_ready());
let x = Tensor::ones(&[2, 8]);
let output = layer.forward(&x); assert_eq!(output.shape(), &[2, 4]);
}
#[test]
#[should_panic(expected = "weight_t")]
fn test_not_ready_forward_panics() {
let layer = Linear::placeholder(8, 4);
assert!(!layer.is_ready());
let x = Tensor::ones(&[2, 8]);
let _ = layer.forward(&x); }
#[test]
fn test_linear_debug_with_bias() {
let layer = Linear::new(10, 5);
let debug_str = format!("{:?}", layer);
assert!(debug_str.contains("Linear"));
assert!(debug_str.contains("in_features"));
assert!(debug_str.contains("out_features"));
assert!(debug_str.contains("bias"));
assert!(debug_str.contains("10"));
assert!(debug_str.contains("5"));
assert!(debug_str.contains("true"));
}
#[test]
fn test_linear_debug_without_bias() {
let layer = Linear::without_bias(8, 4);
let debug_str = format!("{:?}", layer);
assert!(debug_str.contains("false"));
}
#[test]
fn test_linear_forward_3d_input() {
let layer = Linear::with_seed(4, 3, Some(42));
let x = Tensor::ones(&[2, 3, 4]);
let output = layer.forward(&x);
assert_eq!(output.shape(), &[2, 3, 3]);
}
#[test]
fn test_linear_forward_4d_input() {
let layer = Linear::with_seed(2, 3, Some(42));
let x = Tensor::ones(&[2, 2, 2, 2]);
let output = layer.forward(&x);
assert_eq!(output.shape(), &[2, 2, 2, 3]);
}
#[test]
fn test_linear_refresh_caches() {
let mut layer = Linear::new(4, 3);
layer.refresh_caches();
assert!(layer.is_ready());
let x = Tensor::ones(&[1, 4]);
let output = layer.forward(&x);
assert_eq!(output.shape(), &[1, 3]);
}
#[test]
fn test_linear_parameters_mut_with_bias() {
let mut layer = Linear::new(4, 3);
let params = layer.parameters_mut();
assert_eq!(params.len(), 2);
}
#[test]
fn test_linear_parameters_mut_without_bias() {
let mut layer = Linear::without_bias(4, 3);
let params = layer.parameters_mut();
assert_eq!(params.len(), 1);
}
#[test]
fn test_linear_forward_without_bias_computation() {
let mut layer = Linear::without_bias_with_seed(2, 2, Some(42));
layer.set_weight(Tensor::new(&[1.0, 0.0, 0.0, 1.0], &[2, 2]).requires_grad());
let x = Tensor::new(&[3.0, 7.0], &[1, 2]);
let output = layer.forward(&x);
let out_data = output.data();
assert!((out_data[0] - 3.0).abs() < 1e-5);
assert!((out_data[1] - 7.0).abs() < 1e-5);
}
#[test]
fn test_placeholder_accessors() {
let layer = Linear::placeholder(16, 8);
assert_eq!(layer.in_features(), 16);
assert_eq!(layer.out_features(), 8);
assert!(!layer.has_bias());
assert!(layer.bias().is_none());
assert_eq!(layer.weight().shape(), &[1]); }
#[test]
fn falsify_lp_001_output_shape() {
for &(batch, d_in, d_out) in &[(1, 4, 8), (16, 32, 16), (64, 128, 64), (1, 1, 1)] {
let layer = Linear::new(d_in, d_out);
let x = Tensor::ones(&[batch, d_in]);
let y = layer.forward(&x);
assert_eq!(
y.shape(),
&[batch, d_out],
"FALSIFIED LP-001: output shape {:?}, expected [{batch}, {d_out}]",
y.shape()
);
}
}
#[test]
fn falsify_lp_002_homogeneity_no_bias() {
let layer = Linear::without_bias(4, 3);
let x = Tensor::new(&[1.0, -2.0, 3.0, -0.5, 0.5, 1.5, -1.0, 2.0], &[2, 4]);
let y_base = layer.forward(&x);
for &alpha in &[2.0_f32, 0.5, -1.0, -3.0, 0.1] {
let scaled_data: Vec<f32> = x.data().iter().map(|&v| v * alpha).collect();
let x_scaled = Tensor::new(&scaled_data, x.shape());
let y_scaled = layer.forward(&x_scaled);
for (i, (&ys, &yb)) in y_scaled.data().iter().zip(y_base.data().iter()).enumerate() {
let expected = alpha * yb;
let diff = (ys - expected).abs();
let tol = 1e-3 * expected.abs().max(1.0);
assert!(
diff < tol,
"FALSIFIED LP-002: f({alpha}*x)[{i}] = {ys}, expected {expected}, diff = {diff}"
);
}
}
}
#[test]
fn falsify_lp_004_zero_input_produces_bias() {
let layer = Linear::new(4, 3);
let x = Tensor::zeros(&[2, 4]);
let y = layer.forward(&x);
let bias = layer.bias().expect("layer has bias");
let bias_data = bias.data();
for row in 0..2 {
for col in 0..3 {
let out_val = y.data()[row * 3 + col];
let bias_val = bias_data[col];
let diff = (out_val - bias_val).abs();
assert!(
diff < 1e-5,
"FALSIFIED LP-004: f(0)[{row}][{col}] = {out_val}, expected bias = {bias_val}"
);
}
}
}
#[test]
fn falsify_lp_002b_zero_preservation_no_bias() {
let layer = Linear::without_bias(4, 3);
let x = Tensor::zeros(&[2, 4]);
let y = layer.forward(&x);
for (i, &val) in y.data().iter().enumerate() {
assert!(
val.abs() < 1e-6,
"FALSIFIED LP-002: f_no_bias(0)[{i}] = {val}, expected 0"
);
}
}